blob: fda64e58574fc2ef6d0efb6691eef5bfa8ff7b4c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * drivers/cpufreq/cpufreq_ondemand.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
Rohit Guptac496cdd2013-04-04 15:45:16 -07007 * (c) 2013 The Linux Foundation. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/cpufreq.h>
Andrew Morton138a01282006-06-23 03:31:19 -070018#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/jiffies.h>
20#include <linux/kernel_stat.h>
akpm@osdl.org3fc54d32006-01-13 15:54:22 -080021#include <linux/mutex.h>
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -070022#include <linux/hrtimer.h>
23#include <linux/tick.h>
24#include <linux/ktime.h>
Thomas Renninger9411b4e2009-02-04 11:54:04 +010025#include <linux/sched.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026#include <linux/input.h>
27#include <linux/workqueue.h>
28#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30/*
31 * dbs is used in this file as a shortform for demandbased switching
32 * It helps to keep variable names smaller, simpler
33 */
34
venkatesh.pallipadi@intel.come9d95bf2008-08-04 11:59:10 -070035#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#define DEF_FREQUENCY_UP_THRESHOLD (80)
David C Niemi3f78a9f2010-10-06 16:54:24 -040037#define DEF_SAMPLING_DOWN_FACTOR (1)
38#define MAX_SAMPLING_DOWN_FACTOR (100000)
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -070039#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
40#define MICRO_FREQUENCY_UP_THRESHOLD (95)
Thomas Renningercef96152009-04-22 13:48:29 +020041#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
Dave Jonesc29f1402005-05-31 19:03:50 -070042#define MIN_FREQUENCY_UP_THRESHOLD (11)
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#define MAX_FREQUENCY_UP_THRESHOLD (100)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044#define MIN_FREQUENCY_DOWN_DIFFERENTIAL (1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Dave Jones32ee8c32006-02-28 00:43:23 -050046/*
47 * The polling frequency of this governor depends on the capability of
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 * the processor. Default polling frequency is 1000 times the transition
Dave Jones32ee8c32006-02-28 00:43:23 -050049 * latency of the processor. The governor will work on any processor with
50 * transition latency <= 10mS, using appropriate sampling
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 * rate.
52 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
53 * this governor will not work.
54 * All times here are in uS.
55 */
Dave Jonesdf8b59b2005-09-20 12:39:35 -070056#define MIN_SAMPLING_RATE_RATIO (2)
Thomas Renninger112124a2009-02-04 11:55:12 +010057
Thomas Renningercef96152009-04-22 13:48:29 +020058static unsigned int min_sampling_rate;
59
Thomas Renninger112124a2009-02-04 11:55:12 +010060#define LATENCY_MULTIPLIER (1000)
Thomas Renningercef96152009-04-22 13:48:29 +020061#define MIN_LATENCY_MULTIPLIER (100)
Thomas Renninger1c256242007-10-02 13:28:12 -070062#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
David Ng8192a2f2012-01-19 14:16:19 -080064#define POWERSAVE_BIAS_MAXLEVEL (1000)
65#define POWERSAVE_BIAS_MINLEVEL (-1000)
66
David Howellsc4028952006-11-22 14:57:56 +000067static void do_dbs_timer(struct work_struct *work);
Thomas Renninger0e625ac2009-07-24 15:25:06 +020068static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
69 unsigned int event);
70
71#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
72static
73#endif
74struct cpufreq_governor cpufreq_gov_ondemand = {
75 .name = "ondemand",
76 .governor = cpufreq_governor_dbs,
77 .max_transition_latency = TRANSITION_LATENCY_LIMIT,
78 .owner = THIS_MODULE,
79};
David Howellsc4028952006-11-22 14:57:56 +000080
81/* Sampling types */
Venkatesh Pallipadi529af7a2007-02-05 16:12:44 -080082enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
84struct cpu_dbs_info_s {
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -070085 cputime64_t prev_cpu_idle;
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -070086 cputime64_t prev_cpu_iowait;
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -070087 cputime64_t prev_cpu_wall;
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -070088 cputime64_t prev_cpu_nice;
Dave Jones32ee8c32006-02-28 00:43:23 -050089 struct cpufreq_policy *cur_policy;
Dave Jones2b03f892009-01-18 01:43:44 -050090 struct delayed_work work;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +040091 struct cpufreq_frequency_table *freq_table;
92 unsigned int freq_lo;
93 unsigned int freq_lo_jiffies;
94 unsigned int freq_hi_jiffies;
David C Niemi3f78a9f2010-10-06 16:54:24 -040095 unsigned int rate_mult;
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -070096 unsigned int prev_load;
97 unsigned int max_load;
Venkatesh Pallipadi529af7a2007-02-05 16:12:44 -080098 int cpu;
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -070099 unsigned int sample_type:1;
100 /*
101 * percpu mutex that serializes governor limit change with
102 * do_dbs_timer invocation. We do not want do_dbs_timer to run
103 * when user is changing the governor or limits.
104 */
105 struct mutex timer_mutex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106};
Tejun Heo245b2e72009-06-24 15:13:48 +0900107static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
David Ng8192a2f2012-01-19 14:16:19 -0800109static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info);
110static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info);
111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112static unsigned int dbs_enable; /* number of CPUs using this policy */
113
Venkatesh Pallipadi4ec223d2006-06-21 15:18:34 -0700114/*
Matt Wagantall46aa0662013-05-31 20:02:01 -0700115 * dbs_mutex protects dbs_enable and dbs_info during start/stop.
Venkatesh Pallipadi4ec223d2006-06-21 15:18:34 -0700116 */
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -0700117static DEFINE_MUTEX(dbs_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118
Matt Wagantall2aa4f052013-05-23 15:52:49 -0700119static struct workqueue_struct *dbs_wq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700120
Stephen Boydc8fc3012012-10-31 17:43:08 -0700121struct dbs_work_struct {
122 struct work_struct work;
123 unsigned int cpu;
124};
125
126static DEFINE_PER_CPU(struct dbs_work_struct, dbs_refresh_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700127
Rohit Guptac496cdd2013-04-04 15:45:16 -0700128struct dbs_sync_work_struct {
129 struct work_struct work;
130 unsigned int src_cpu;
131 unsigned int targ_cpu;
132};
133
134static DEFINE_PER_CPU(struct dbs_sync_work_struct, dbs_sync_work);
135
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400136static struct dbs_tuners {
Dave Jones32ee8c32006-02-28 00:43:23 -0500137 unsigned int sampling_rate;
Dave Jones32ee8c32006-02-28 00:43:23 -0500138 unsigned int up_threshold;
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700139 unsigned int up_threshold_multi_core;
venkatesh.pallipadi@intel.come9d95bf2008-08-04 11:59:10 -0700140 unsigned int down_differential;
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700141 unsigned int down_differential_multi_core;
142 unsigned int optimal_freq;
143 unsigned int up_threshold_any_cpu_load;
144 unsigned int sync_freq;
Dave Jones32ee8c32006-02-28 00:43:23 -0500145 unsigned int ignore_nice;
David C Niemi3f78a9f2010-10-06 16:54:24 -0400146 unsigned int sampling_down_factor;
David Ng8192a2f2012-01-19 14:16:19 -0800147 int powersave_bias;
Arjan van de Ven19379b12010-05-09 08:26:51 -0700148 unsigned int io_is_busy;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400149} dbs_tuners_ins = {
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700150 .up_threshold_multi_core = DEF_FREQUENCY_UP_THRESHOLD,
Dave Jones32ee8c32006-02-28 00:43:23 -0500151 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
David C Niemi3f78a9f2010-10-06 16:54:24 -0400152 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
venkatesh.pallipadi@intel.come9d95bf2008-08-04 11:59:10 -0700153 .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700154 .down_differential_multi_core = MICRO_FREQUENCY_DOWN_DIFFERENTIAL,
155 .up_threshold_any_cpu_load = DEF_FREQUENCY_UP_THRESHOLD,
Eric Piel9cbad612006-03-10 11:35:27 +0200156 .ignore_nice = 0,
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400157 .powersave_bias = 0,
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700158 .sync_freq = 0,
159 .optimal_freq = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160};
161
Glauber Costa3292beb2011-11-28 14:45:17 -0200162static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
Dave Jonesdac1c1a2005-05-31 19:03:49 -0700163{
Glauber Costa3292beb2011-11-28 14:45:17 -0200164 u64 idle_time;
Martin Schwidefsky612ef282011-12-19 19:23:15 +0100165 u64 cur_wall_time;
Glauber Costa3292beb2011-11-28 14:45:17 -0200166 u64 busy_time;
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700167
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700168 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700169
Martin Schwidefsky612ef282011-12-19 19:23:15 +0100170 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
171 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
Glauber Costa3292beb2011-11-28 14:45:17 -0200172 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
173 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
174 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
175 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700176
Martin Schwidefsky64861632011-12-15 14:56:09 +0100177 idle_time = cur_wall_time - busy_time;
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700178 if (wall)
Glauber Costa3292beb2011-11-28 14:45:17 -0200179 *wall = jiffies_to_usecs(cur_wall_time);
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700180
Glauber Costa3292beb2011-11-28 14:45:17 -0200181 return jiffies_to_usecs(idle_time);
Dave Jonesdac1c1a2005-05-31 19:03:49 -0700182}
183
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -0700184static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
185{
Michal Hocko6beea0c2011-08-24 09:37:48 +0200186 u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -0700187
188 if (idle_time == -1ULL)
189 return get_cpu_idle_time_jiffy(cpu, wall);
Michal Hocko6beea0c2011-08-24 09:37:48 +0200190 else
191 idle_time += get_cpu_iowait_time_us(cpu, wall);
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -0700192
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -0700193 return idle_time;
194}
195
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700196static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall)
197{
198 u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
199
200 if (iowait_time == -1ULL)
201 return 0;
202
203 return iowait_time;
204}
205
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400206/*
207 * Find right freq to be set now with powersave_bias on.
208 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
209 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
210 */
Adrian Bunkb5ecf602006-08-13 23:00:08 +0200211static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
212 unsigned int freq_next,
213 unsigned int relation)
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400214{
David Ng8192a2f2012-01-19 14:16:19 -0800215 unsigned int freq_req, freq_avg;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400216 unsigned int freq_hi, freq_lo;
217 unsigned int index = 0;
218 unsigned int jiffies_total, jiffies_hi, jiffies_lo;
David Ng8192a2f2012-01-19 14:16:19 -0800219 int freq_reduc;
Tejun Heo245b2e72009-06-24 15:13:48 +0900220 struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
221 policy->cpu);
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400222
223 if (!dbs_info->freq_table) {
224 dbs_info->freq_lo = 0;
225 dbs_info->freq_lo_jiffies = 0;
226 return freq_next;
227 }
228
229 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
230 relation, &index);
231 freq_req = dbs_info->freq_table[index].frequency;
232 freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
233 freq_avg = freq_req - freq_reduc;
234
235 /* Find freq bounds for freq_avg in freq_table */
236 index = 0;
237 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
238 CPUFREQ_RELATION_H, &index);
239 freq_lo = dbs_info->freq_table[index].frequency;
240 index = 0;
241 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
242 CPUFREQ_RELATION_L, &index);
243 freq_hi = dbs_info->freq_table[index].frequency;
244
245 /* Find out how long we have to be in hi and lo freqs */
246 if (freq_hi == freq_lo) {
247 dbs_info->freq_lo = 0;
248 dbs_info->freq_lo_jiffies = 0;
249 return freq_lo;
250 }
251 jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
252 jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
253 jiffies_hi += ((freq_hi - freq_lo) / 2);
254 jiffies_hi /= (freq_hi - freq_lo);
255 jiffies_lo = jiffies_total - jiffies_hi;
256 dbs_info->freq_lo = freq_lo;
257 dbs_info->freq_lo_jiffies = jiffies_lo;
258 dbs_info->freq_hi_jiffies = jiffies_hi;
259 return freq_hi;
260}
261
David Ng8192a2f2012-01-19 14:16:19 -0800262static int ondemand_powersave_bias_setspeed(struct cpufreq_policy *policy,
263 struct cpufreq_policy *altpolicy,
264 int level)
265{
266 if (level == POWERSAVE_BIAS_MAXLEVEL) {
267 /* maximum powersave; set to lowest frequency */
268 __cpufreq_driver_target(policy,
269 (altpolicy) ? altpolicy->min : policy->min,
270 CPUFREQ_RELATION_L);
271 return 1;
272 } else if (level == POWERSAVE_BIAS_MINLEVEL) {
273 /* minimum powersave; set to highest frequency */
274 __cpufreq_driver_target(policy,
275 (altpolicy) ? altpolicy->max : policy->max,
276 CPUFREQ_RELATION_H);
277 return 1;
278 }
279 return 0;
280}
281
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700282static void ondemand_powersave_bias_init_cpu(int cpu)
283{
Tejun Heo384be2b2009-08-14 14:41:02 +0900284 struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700285 dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
286 dbs_info->freq_lo = 0;
287}
288
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400289static void ondemand_powersave_bias_init(void)
290{
291 int i;
292 for_each_online_cpu(i) {
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700293 ondemand_powersave_bias_init_cpu(i);
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400294 }
295}
296
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297/************************** sysfs interface ************************/
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200298
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200299static ssize_t show_sampling_rate_min(struct kobject *kobj,
300 struct attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301{
Thomas Renningercef96152009-04-22 13:48:29 +0200302 return sprintf(buf, "%u\n", min_sampling_rate);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303}
304
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200305define_one_global_ro(sampling_rate_min);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306
307/* cpufreq_ondemand Governor Tunables */
308#define show_one(file_name, object) \
309static ssize_t show_##file_name \
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200310(struct kobject *kobj, struct attribute *attr, char *buf) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311{ \
312 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
313}
314show_one(sampling_rate, sampling_rate);
Arjan van de Ven19379b12010-05-09 08:26:51 -0700315show_one(io_is_busy, io_is_busy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316show_one(up_threshold, up_threshold);
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700317show_one(up_threshold_multi_core, up_threshold_multi_core);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700318show_one(down_differential, down_differential);
David C Niemi3f78a9f2010-10-06 16:54:24 -0400319show_one(sampling_down_factor, sampling_down_factor);
Alexander Clouter001893c2005-12-01 01:09:25 -0800320show_one(ignore_nice_load, ignore_nice);
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700321show_one(optimal_freq, optimal_freq);
322show_one(up_threshold_any_cpu_load, up_threshold_any_cpu_load);
323show_one(sync_freq, sync_freq);
David Ng8192a2f2012-01-19 14:16:19 -0800324
325static ssize_t show_powersave_bias
326(struct kobject *kobj, struct attribute *attr, char *buf)
327{
328 return snprintf(buf, PAGE_SIZE, "%d\n", dbs_tuners_ins.powersave_bias);
329}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
MyungJoo Hamfd0ef7a2012-02-29 17:54:41 +0900331/**
332 * update_sampling_rate - update sampling rate effective immediately if needed.
333 * @new_rate: new sampling rate
334 *
335 * If new rate is smaller than the old, simply updaing
336 * dbs_tuners_int.sampling_rate might not be appropriate. For example,
337 * if the original sampling_rate was 1 second and the requested new sampling
338 * rate is 10 ms because the user needs immediate reaction from ondemand
339 * governor, but not sure if higher frequency will be required or not,
340 * then, the governor may change the sampling rate too late; up to 1 second
341 * later. Thus, if we are reducing the sampling rate, we need to make the
342 * new value effective immediately.
343 */
344static void update_sampling_rate(unsigned int new_rate)
345{
346 int cpu;
347
348 dbs_tuners_ins.sampling_rate = new_rate
349 = max(new_rate, min_sampling_rate);
350
351 for_each_online_cpu(cpu) {
352 struct cpufreq_policy *policy;
353 struct cpu_dbs_info_s *dbs_info;
354 unsigned long next_sampling, appointed_at;
355
356 policy = cpufreq_cpu_get(cpu);
357 if (!policy)
358 continue;
359 dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu);
360 cpufreq_cpu_put(policy);
361
362 mutex_lock(&dbs_info->timer_mutex);
363
364 if (!delayed_work_pending(&dbs_info->work)) {
365 mutex_unlock(&dbs_info->timer_mutex);
366 continue;
367 }
368
369 next_sampling = jiffies + usecs_to_jiffies(new_rate);
370 appointed_at = dbs_info->work.timer.expires;
371
372
373 if (time_before(next_sampling, appointed_at)) {
374
375 mutex_unlock(&dbs_info->timer_mutex);
376 cancel_delayed_work_sync(&dbs_info->work);
377 mutex_lock(&dbs_info->timer_mutex);
378
Matt Wagantall2aa4f052013-05-23 15:52:49 -0700379 queue_delayed_work_on(dbs_info->cpu, dbs_wq,
380 &dbs_info->work, usecs_to_jiffies(new_rate));
MyungJoo Hamfd0ef7a2012-02-29 17:54:41 +0900381
382 }
383 mutex_unlock(&dbs_info->timer_mutex);
384 }
385}
386
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200387static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
388 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389{
390 unsigned int input;
391 int ret;
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -0700392 ret = sscanf(buf, "%u", &input);
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700393 if (ret != 1)
394 return -EINVAL;
MyungJoo Hamfd0ef7a2012-02-29 17:54:41 +0900395 update_sampling_rate(input);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 return count;
397}
398
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700399static ssize_t store_sync_freq(struct kobject *a, struct attribute *b,
400 const char *buf, size_t count)
401{
402 unsigned int input;
403 int ret;
404
405 ret = sscanf(buf, "%u", &input);
406 if (ret != 1)
407 return -EINVAL;
408 dbs_tuners_ins.sync_freq = input;
409 return count;
410}
411
Arjan van de Ven19379b12010-05-09 08:26:51 -0700412static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
413 const char *buf, size_t count)
414{
415 unsigned int input;
416 int ret;
417
418 ret = sscanf(buf, "%u", &input);
419 if (ret != 1)
420 return -EINVAL;
Arjan van de Ven19379b12010-05-09 08:26:51 -0700421 dbs_tuners_ins.io_is_busy = !!input;
Arjan van de Ven19379b12010-05-09 08:26:51 -0700422 return count;
423}
424
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700425static ssize_t store_optimal_freq(struct kobject *a, struct attribute *b,
426 const char *buf, size_t count)
427{
428 unsigned int input;
429 int ret;
430
431 ret = sscanf(buf, "%u", &input);
432 if (ret != 1)
433 return -EINVAL;
434 dbs_tuners_ins.optimal_freq = input;
435 return count;
436}
437
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200438static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
439 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440{
441 unsigned int input;
442 int ret;
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -0700443 ret = sscanf(buf, "%u", &input);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444
Dave Jones32ee8c32006-02-28 00:43:23 -0500445 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
Dave Jonesc29f1402005-05-31 19:03:50 -0700446 input < MIN_FREQUENCY_UP_THRESHOLD) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 return -EINVAL;
448 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 dbs_tuners_ins.up_threshold = input;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 return count;
451}
452
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700453static ssize_t store_up_threshold_multi_core(struct kobject *a,
454 struct attribute *b, const char *buf, size_t count)
455{
456 unsigned int input;
457 int ret;
458 ret = sscanf(buf, "%u", &input);
459
460 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
461 input < MIN_FREQUENCY_UP_THRESHOLD) {
462 return -EINVAL;
463 }
464 dbs_tuners_ins.up_threshold_multi_core = input;
465 return count;
466}
467
468static ssize_t store_up_threshold_any_cpu_load(struct kobject *a,
469 struct attribute *b, const char *buf, size_t count)
470{
471 unsigned int input;
472 int ret;
473 ret = sscanf(buf, "%u", &input);
474
475 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
476 input < MIN_FREQUENCY_UP_THRESHOLD) {
477 return -EINVAL;
478 }
479 dbs_tuners_ins.up_threshold_any_cpu_load = input;
480 return count;
481}
482
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700483static ssize_t store_down_differential(struct kobject *a, struct attribute *b,
484 const char *buf, size_t count)
485{
486 unsigned int input;
487 int ret;
488 ret = sscanf(buf, "%u", &input);
489
490 if (ret != 1 || input >= dbs_tuners_ins.up_threshold ||
491 input < MIN_FREQUENCY_DOWN_DIFFERENTIAL) {
492 return -EINVAL;
493 }
494
495 dbs_tuners_ins.down_differential = input;
496
497 return count;
498}
499
David C Niemi3f78a9f2010-10-06 16:54:24 -0400500static ssize_t store_sampling_down_factor(struct kobject *a,
501 struct attribute *b, const char *buf, size_t count)
502{
503 unsigned int input, j;
504 int ret;
505 ret = sscanf(buf, "%u", &input);
506
507 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
508 return -EINVAL;
David C Niemi3f78a9f2010-10-06 16:54:24 -0400509 dbs_tuners_ins.sampling_down_factor = input;
510
511 /* Reset down sampling multiplier in case it was active */
512 for_each_online_cpu(j) {
513 struct cpu_dbs_info_s *dbs_info;
514 dbs_info = &per_cpu(od_cpu_dbs_info, j);
515 dbs_info->rate_mult = 1;
516 }
David C Niemi3f78a9f2010-10-06 16:54:24 -0400517 return count;
518}
519
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200520static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
521 const char *buf, size_t count)
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700522{
523 unsigned int input;
524 int ret;
525
526 unsigned int j;
Dave Jones32ee8c32006-02-28 00:43:23 -0500527
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -0700528 ret = sscanf(buf, "%u", &input);
Dave Jones2b03f892009-01-18 01:43:44 -0500529 if (ret != 1)
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700530 return -EINVAL;
531
Dave Jones2b03f892009-01-18 01:43:44 -0500532 if (input > 1)
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700533 input = 1;
Dave Jones32ee8c32006-02-28 00:43:23 -0500534
Dave Jones2b03f892009-01-18 01:43:44 -0500535 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700536 return count;
537 }
538 dbs_tuners_ins.ignore_nice = input;
539
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700540 /* we need to re-evaluate prev_cpu_idle */
Dave Jonesdac1c1a2005-05-31 19:03:49 -0700541 for_each_online_cpu(j) {
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700542 struct cpu_dbs_info_s *dbs_info;
Tejun Heo245b2e72009-06-24 15:13:48 +0900543 dbs_info = &per_cpu(od_cpu_dbs_info, j);
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700544 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
545 &dbs_info->prev_cpu_wall);
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500546 if (dbs_tuners_ins.ignore_nice)
Glauber Costa3292beb2011-11-28 14:45:17 -0200547 dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500548
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700549 }
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700550 return count;
551}
552
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200553static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
554 const char *buf, size_t count)
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400555{
David Ng8192a2f2012-01-19 14:16:19 -0800556 int input = 0;
557 int bypass = 0;
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530558 int ret, cpu, reenable_timer, j;
David Ng8192a2f2012-01-19 14:16:19 -0800559 struct cpu_dbs_info_s *dbs_info;
560
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530561 struct cpumask cpus_timer_done;
562 cpumask_clear(&cpus_timer_done);
563
David Ng8192a2f2012-01-19 14:16:19 -0800564 ret = sscanf(buf, "%d", &input);
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400565
566 if (ret != 1)
567 return -EINVAL;
568
David Ng8192a2f2012-01-19 14:16:19 -0800569 if (input >= POWERSAVE_BIAS_MAXLEVEL) {
570 input = POWERSAVE_BIAS_MAXLEVEL;
571 bypass = 1;
572 } else if (input <= POWERSAVE_BIAS_MINLEVEL) {
573 input = POWERSAVE_BIAS_MINLEVEL;
574 bypass = 1;
575 }
576
577 if (input == dbs_tuners_ins.powersave_bias) {
578 /* no change */
579 return count;
580 }
581
582 reenable_timer = ((dbs_tuners_ins.powersave_bias ==
583 POWERSAVE_BIAS_MAXLEVEL) ||
584 (dbs_tuners_ins.powersave_bias ==
585 POWERSAVE_BIAS_MINLEVEL));
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400586
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400587 dbs_tuners_ins.powersave_bias = input;
Matt Wagantall46aa0662013-05-31 20:02:01 -0700588
589 mutex_lock(&dbs_mutex);
Matt Wagantall0afbad12013-05-31 13:14:44 -0700590 get_online_cpus();
Matt Wagantall46aa0662013-05-31 20:02:01 -0700591
David Ng8192a2f2012-01-19 14:16:19 -0800592 if (!bypass) {
593 if (reenable_timer) {
594 /* reinstate dbs timer */
595 for_each_online_cpu(cpu) {
596 if (lock_policy_rwsem_write(cpu) < 0)
597 continue;
598
599 dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530600
601 for_each_cpu(j, &cpus_timer_done) {
602 if (!dbs_info->cur_policy) {
603 pr_err("Dbs policy is NULL\n");
604 goto skip_this_cpu;
605 }
606 if (cpumask_test_cpu(j, dbs_info->
607 cur_policy->cpus))
608 goto skip_this_cpu;
609 }
610
611 cpumask_set_cpu(cpu, &cpus_timer_done);
David Ng8192a2f2012-01-19 14:16:19 -0800612 if (dbs_info->cur_policy) {
613 /* restart dbs timer */
614 dbs_timer_init(dbs_info);
615 }
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530616skip_this_cpu:
David Ng8192a2f2012-01-19 14:16:19 -0800617 unlock_policy_rwsem_write(cpu);
618 }
619 }
620 ondemand_powersave_bias_init();
621 } else {
622 /* running at maximum or minimum frequencies; cancel
623 dbs timer as periodic load sampling is not necessary */
624 for_each_online_cpu(cpu) {
625 if (lock_policy_rwsem_write(cpu) < 0)
626 continue;
627
628 dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530629
630 for_each_cpu(j, &cpus_timer_done) {
631 if (!dbs_info->cur_policy) {
632 pr_err("Dbs policy is NULL\n");
633 goto skip_this_cpu_bypass;
634 }
635 if (cpumask_test_cpu(j, dbs_info->
636 cur_policy->cpus))
637 goto skip_this_cpu_bypass;
638 }
639
640 cpumask_set_cpu(cpu, &cpus_timer_done);
641
David Ng8192a2f2012-01-19 14:16:19 -0800642 if (dbs_info->cur_policy) {
643 /* cpu using ondemand, cancel dbs timer */
644 mutex_lock(&dbs_info->timer_mutex);
645 dbs_timer_exit(dbs_info);
646
647 ondemand_powersave_bias_setspeed(
648 dbs_info->cur_policy,
649 NULL,
650 input);
651
652 mutex_unlock(&dbs_info->timer_mutex);
653 }
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530654skip_this_cpu_bypass:
David Ng8192a2f2012-01-19 14:16:19 -0800655 unlock_policy_rwsem_write(cpu);
656 }
657 }
Matt Wagantall46aa0662013-05-31 20:02:01 -0700658
Matt Wagantall0afbad12013-05-31 13:14:44 -0700659 put_online_cpus();
Matt Wagantall46aa0662013-05-31 20:02:01 -0700660 mutex_unlock(&dbs_mutex);
David Ng8192a2f2012-01-19 14:16:19 -0800661
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400662 return count;
663}
664
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200665define_one_global_rw(sampling_rate);
Linus Torvalds07d77752010-05-18 08:49:13 -0700666define_one_global_rw(io_is_busy);
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200667define_one_global_rw(up_threshold);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700668define_one_global_rw(down_differential);
David C Niemi3f78a9f2010-10-06 16:54:24 -0400669define_one_global_rw(sampling_down_factor);
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200670define_one_global_rw(ignore_nice_load);
671define_one_global_rw(powersave_bias);
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700672define_one_global_rw(up_threshold_multi_core);
673define_one_global_rw(optimal_freq);
674define_one_global_rw(up_threshold_any_cpu_load);
675define_one_global_rw(sync_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676
Dave Jones2b03f892009-01-18 01:43:44 -0500677static struct attribute *dbs_attributes[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 &sampling_rate_min.attr,
679 &sampling_rate.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 &up_threshold.attr,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700681 &down_differential.attr,
David C Niemi3f78a9f2010-10-06 16:54:24 -0400682 &sampling_down_factor.attr,
Alexander Clouter001893c2005-12-01 01:09:25 -0800683 &ignore_nice_load.attr,
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400684 &powersave_bias.attr,
Arjan van de Ven19379b12010-05-09 08:26:51 -0700685 &io_is_busy.attr,
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700686 &up_threshold_multi_core.attr,
687 &optimal_freq.attr,
688 &up_threshold_any_cpu_load.attr,
689 &sync_freq.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 NULL
691};
692
693static struct attribute_group dbs_attr_group = {
694 .attrs = dbs_attributes,
695 .name = "ondemand",
696};
697
698/************************** sysfs end ************************/
699
Mike Chan00e299f2010-01-26 17:06:47 -0800700static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
701{
702 if (dbs_tuners_ins.powersave_bias)
703 freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
704 else if (p->cur == p->max)
705 return;
706
707 __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ?
708 CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
709}
710
Venkatesh Pallipadi2f8a8352006-06-28 13:51:19 -0700711static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712{
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800713 /* Extrapolated load of this CPU */
714 unsigned int load_at_max_freq = 0;
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700715 unsigned int max_load_freq;
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800716 /* Current load across this CPU */
717 unsigned int cur_load = 0;
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700718 unsigned int max_load_other_cpu = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 struct cpufreq_policy *policy;
720 unsigned int j;
721
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400722 this_dbs_info->freq_lo = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 policy = this_dbs_info->cur_policy;
Venki Pallipadiea487612007-06-20 14:26:24 -0700724
Dave Jones32ee8c32006-02-28 00:43:23 -0500725 /*
Dave Jonesc29f1402005-05-31 19:03:50 -0700726 * Every sampling_rate, we check, if current idle time is less
727 * than 20% (default), then we try to increase frequency
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700728 * Every sampling_rate, we look for a the lowest
Dave Jonesc29f1402005-05-31 19:03:50 -0700729 * frequency which can sustain the load while keeping idle time over
730 * 30%. If such a frequency exist, we try to decrease to this frequency.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 *
Dave Jones32ee8c32006-02-28 00:43:23 -0500732 * Any frequency increase takes it to the maximum frequency.
733 * Frequency reduction happens at minimum steps of
734 * 5% (default) of current frequency
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 */
736
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700737 /* Get Absolute Load - in terms of freq */
738 max_load_freq = 0;
739
Rusty Russell835481d2009-01-04 05:18:06 -0800740 for_each_cpu(j, policy->cpus) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 struct cpu_dbs_info_s *j_dbs_info;
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700742 cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
743 unsigned int idle_time, wall_time, iowait_time;
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800744 unsigned int load_freq;
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700745 int freq_avg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746
Tejun Heo245b2e72009-06-24 15:13:48 +0900747 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700748
749 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700750 cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700751
Martin Schwidefsky64861632011-12-15 14:56:09 +0100752 wall_time = (unsigned int)
753 (cur_wall_time - j_dbs_info->prev_cpu_wall);
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700754 j_dbs_info->prev_cpu_wall = cur_wall_time;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
Martin Schwidefsky64861632011-12-15 14:56:09 +0100756 idle_time = (unsigned int)
757 (cur_idle_time - j_dbs_info->prev_cpu_idle);
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700758 j_dbs_info->prev_cpu_idle = cur_idle_time;
759
Martin Schwidefsky64861632011-12-15 14:56:09 +0100760 iowait_time = (unsigned int)
761 (cur_iowait_time - j_dbs_info->prev_cpu_iowait);
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700762 j_dbs_info->prev_cpu_iowait = cur_iowait_time;
763
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500764 if (dbs_tuners_ins.ignore_nice) {
Glauber Costa3292beb2011-11-28 14:45:17 -0200765 u64 cur_nice;
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500766 unsigned long cur_nice_jiffies;
767
Glauber Costa3292beb2011-11-28 14:45:17 -0200768 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
769 j_dbs_info->prev_cpu_nice;
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500770 /*
771 * Assumption: nice time between sampling periods will
772 * be less than 2^32 jiffies for 32 bit sys
773 */
774 cur_nice_jiffies = (unsigned long)
775 cputime64_to_jiffies64(cur_nice);
776
Glauber Costa3292beb2011-11-28 14:45:17 -0200777 j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500778 idle_time += jiffies_to_usecs(cur_nice_jiffies);
779 }
780
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700781 /*
782 * For the purpose of ondemand, waiting for disk IO is an
783 * indication that you're performance critical, and not that
784 * the system is actually idle. So subtract the iowait time
785 * from the cpu idle time.
786 */
787
Arjan van de Ven19379b12010-05-09 08:26:51 -0700788 if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time)
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700789 idle_time -= iowait_time;
790
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700791 if (unlikely(!wall_time || wall_time < idle_time))
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700792 continue;
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700793
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800794 cur_load = 100 * (wall_time - idle_time) / wall_time;
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700795 j_dbs_info->max_load = max(cur_load, j_dbs_info->prev_load);
796 j_dbs_info->prev_load = cur_load;
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700797 freq_avg = __cpufreq_driver_getavg(policy, j);
798 if (freq_avg <= 0)
799 freq_avg = policy->cur;
800
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800801 load_freq = cur_load * freq_avg;
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700802 if (load_freq > max_load_freq)
803 max_load_freq = load_freq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 }
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700805
806 for_each_online_cpu(j) {
807 struct cpu_dbs_info_s *j_dbs_info;
808 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
809
810 if (j == policy->cpu)
811 continue;
812
813 if (max_load_other_cpu < j_dbs_info->max_load)
814 max_load_other_cpu = j_dbs_info->max_load;
815 /*
816 * The other cpu could be running at higher frequency
817 * but may not have completed it's sampling_down_factor.
818 * For that case consider other cpu is loaded so that
819 * frequency imbalance does not occur.
820 */
821
822 if ((j_dbs_info->cur_policy != NULL)
823 && (j_dbs_info->cur_policy->cur ==
824 j_dbs_info->cur_policy->max)) {
825
826 if (policy->cur >= dbs_tuners_ins.optimal_freq)
827 max_load_other_cpu =
828 dbs_tuners_ins.up_threshold_any_cpu_load;
829 }
830 }
831
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800832 /* calculate the scaled load across CPU */
833 load_at_max_freq = (cur_load * policy->cur)/policy->cpuinfo.max_freq;
834
835 cpufreq_notify_utilization(policy, load_at_max_freq);
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700836 /* Check for frequency increase */
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700837 if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
David C Niemi3f78a9f2010-10-06 16:54:24 -0400838 /* If switching to max speed, apply sampling_down_factor */
839 if (policy->cur < policy->max)
840 this_dbs_info->rate_mult =
841 dbs_tuners_ins.sampling_down_factor;
Mike Chan00e299f2010-01-26 17:06:47 -0800842 dbs_freq_increase(policy, policy->max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 return;
844 }
845
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700846 if (num_online_cpus() > 1) {
847
848 if (max_load_other_cpu >
849 dbs_tuners_ins.up_threshold_any_cpu_load) {
850 if (policy->cur < dbs_tuners_ins.sync_freq)
851 dbs_freq_increase(policy,
852 dbs_tuners_ins.sync_freq);
853 return;
854 }
855
856 if (max_load_freq > dbs_tuners_ins.up_threshold_multi_core *
857 policy->cur) {
858 if (policy->cur < dbs_tuners_ins.optimal_freq)
859 dbs_freq_increase(policy,
860 dbs_tuners_ins.optimal_freq);
861 return;
862 }
863 }
864
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 /* Check for frequency decrease */
Dave Jonesc29f1402005-05-31 19:03:50 -0700866 /* if we cannot reduce the frequency anymore, break out early */
867 if (policy->cur == policy->min)
868 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869
Dave Jonesc29f1402005-05-31 19:03:50 -0700870 /*
871 * The optimal frequency is the frequency that is the lowest that
872 * can support the current CPU usage without triggering the up
873 * policy. To be safe, we focus 10 points under the threshold.
874 */
venkatesh.pallipadi@intel.come9d95bf2008-08-04 11:59:10 -0700875 if (max_load_freq <
876 (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) *
877 policy->cur) {
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700878 unsigned int freq_next;
venkatesh.pallipadi@intel.come9d95bf2008-08-04 11:59:10 -0700879 freq_next = max_load_freq /
880 (dbs_tuners_ins.up_threshold -
881 dbs_tuners_ins.down_differential);
Venkatesh Pallipadidfde5d62006-10-03 12:38:45 -0700882
David C Niemi3f78a9f2010-10-06 16:54:24 -0400883 /* No longer fully busy, reset rate_mult */
884 this_dbs_info->rate_mult = 1;
885
Nagananda.Chumbalkar@hp.com1dbf5882009-12-21 23:40:52 +0100886 if (freq_next < policy->min)
887 freq_next = policy->min;
888
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700889 if (num_online_cpus() > 1) {
890 if (max_load_other_cpu >
891 (dbs_tuners_ins.up_threshold_multi_core -
892 dbs_tuners_ins.down_differential) &&
893 freq_next < dbs_tuners_ins.sync_freq)
894 freq_next = dbs_tuners_ins.sync_freq;
895
896 if (max_load_freq >
Veena Sambasivan8aa24862013-05-14 12:36:48 -0700897 ((dbs_tuners_ins.up_threshold_multi_core -
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700898 dbs_tuners_ins.down_differential_multi_core) *
Veena Sambasivan8aa24862013-05-14 12:36:48 -0700899 policy->cur) &&
900 freq_next < dbs_tuners_ins.optimal_freq)
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700901 freq_next = dbs_tuners_ins.optimal_freq;
902
903 }
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400904 if (!dbs_tuners_ins.powersave_bias) {
905 __cpufreq_driver_target(policy, freq_next,
906 CPUFREQ_RELATION_L);
907 } else {
908 int freq = powersave_bias_target(policy, freq_next,
909 CPUFREQ_RELATION_L);
910 __cpufreq_driver_target(policy, freq,
911 CPUFREQ_RELATION_L);
912 }
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700913 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914}
915
David Howellsc4028952006-11-22 14:57:56 +0000916static void do_dbs_timer(struct work_struct *work)
Dave Jones32ee8c32006-02-28 00:43:23 -0500917{
Venkatesh Pallipadi529af7a2007-02-05 16:12:44 -0800918 struct cpu_dbs_info_s *dbs_info =
919 container_of(work, struct cpu_dbs_info_s, work.work);
920 unsigned int cpu = dbs_info->cpu;
921 int sample_type = dbs_info->sample_type;
922
Vincent Guittot5cb2c3b2011-02-07 17:14:25 +0100923 int delay;
Jocelyn Falempea665df92010-03-11 14:01:11 -0800924
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700925 mutex_lock(&dbs_info->timer_mutex);
Venkatesh Pallipadi56463b72007-02-05 16:12:45 -0800926
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400927 /* Common NORMAL_SAMPLE setup */
David Howellsc4028952006-11-22 14:57:56 +0000928 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400929 if (!dbs_tuners_ins.powersave_bias ||
David Howellsc4028952006-11-22 14:57:56 +0000930 sample_type == DBS_NORMAL_SAMPLE) {
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400931 dbs_check_cpu(dbs_info);
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400932 if (dbs_info->freq_lo) {
933 /* Setup timer for SUB_SAMPLE */
David Howellsc4028952006-11-22 14:57:56 +0000934 dbs_info->sample_type = DBS_SUB_SAMPLE;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400935 delay = dbs_info->freq_hi_jiffies;
Vincent Guittot5cb2c3b2011-02-07 17:14:25 +0100936 } else {
937 /* We want all CPUs to do sampling nearly on
938 * same jiffy
939 */
940 delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
941 * dbs_info->rate_mult);
942
943 if (num_online_cpus() > 1)
944 delay -= jiffies % delay;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400945 }
946 } else {
947 __cpufreq_driver_target(dbs_info->cur_policy,
Dave Jones2b03f892009-01-18 01:43:44 -0500948 dbs_info->freq_lo, CPUFREQ_RELATION_H);
Vincent Guittot5cb2c3b2011-02-07 17:14:25 +0100949 delay = dbs_info->freq_lo_jiffies;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400950 }
Matt Wagantall2aa4f052013-05-23 15:52:49 -0700951 queue_delayed_work_on(cpu, dbs_wq, &dbs_info->work, delay);
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700952 mutex_unlock(&dbs_info->timer_mutex);
Dave Jones32ee8c32006-02-28 00:43:23 -0500953}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954
Venkatesh Pallipadi529af7a2007-02-05 16:12:44 -0800955static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956{
Alexey Starikovskiy1ce28d62006-07-31 22:25:20 +0400957 /* We want all CPUs to do sampling nearly on same jiffy */
958 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
Jocelyn Falempea665df92010-03-11 14:01:11 -0800959
960 if (num_online_cpus() > 1)
961 delay -= jiffies % delay;
Venkatesh Pallipadi2f8a8352006-06-28 13:51:19 -0700962
David Howellsc4028952006-11-22 14:57:56 +0000963 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
Venki Pallipadi28287032007-05-08 00:27:47 -0700964 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
Matt Wagantall2aa4f052013-05-23 15:52:49 -0700965 queue_delayed_work_on(dbs_info->cpu, dbs_wq, &dbs_info->work, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966}
967
Linus Torvalds2cd7cbd2006-07-23 12:05:00 -0700968static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969{
Mathieu Desnoyersb14893a2009-05-17 10:30:45 -0400970 cancel_delayed_work_sync(&dbs_info->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971}
972
Arjan van de Ven19379b12010-05-09 08:26:51 -0700973/*
974 * Not all CPUs want IO time to be accounted as busy; this dependson how
975 * efficient idling at a higher frequency/voltage is.
976 * Pavel Machek says this is not so for various generations of AMD and old
977 * Intel systems.
978 * Mike Chan (androidlcom) calis this is also not true for ARM.
979 * Because of this, whitelist specific known (series) of CPUs by default, and
980 * leave all others up to the user.
981 */
982static int should_io_be_busy(void)
983{
984#if defined(CONFIG_X86)
985 /*
986 * For Intel, Core 2 (model 15) andl later have an efficient idle.
987 */
988 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
989 boot_cpu_data.x86 == 6 &&
990 boot_cpu_data.x86_model >= 15)
991 return 1;
992#endif
993 return 0;
994}
995
Stephen Boydc8fc3012012-10-31 17:43:08 -0700996static void dbs_refresh_callback(struct work_struct *work)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700997{
998 struct cpufreq_policy *policy;
999 struct cpu_dbs_info_s *this_dbs_info;
Stephen Boydc8fc3012012-10-31 17:43:08 -07001000 struct dbs_work_struct *dbs_work;
1001 unsigned int cpu;
1002
1003 dbs_work = container_of(work, struct dbs_work_struct, work);
1004 cpu = dbs_work->cpu;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001005
Krishna Vankaa3e04d82012-06-08 11:35:43 +05301006 get_online_cpus();
1007
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001008 if (lock_policy_rwsem_write(cpu) < 0)
Krishna Vankaa3e04d82012-06-08 11:35:43 +05301009 goto bail_acq_sema_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001010
1011 this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
1012 policy = this_dbs_info->cur_policy;
David Ng4a0a0232011-08-03 14:04:43 -07001013 if (!policy) {
1014 /* CPU not using ondemand governor */
Krishna Vankaa3e04d82012-06-08 11:35:43 +05301015 goto bail_incorrect_governor;
David Ng4a0a0232011-08-03 14:04:43 -07001016 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001017
1018 if (policy->cur < policy->max) {
Anji Jonnalaf8732322012-12-13 14:03:54 +05301019 /*
1020 * Arch specific cpufreq driver may fail.
1021 * Don't update governor frequency upon failure.
1022 */
1023 if (__cpufreq_driver_target(policy, policy->max,
1024 CPUFREQ_RELATION_L) >= 0)
1025 policy->cur = policy->max;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001026
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001027 this_dbs_info->prev_cpu_idle = get_cpu_idle_time(cpu,
1028 &this_dbs_info->prev_cpu_wall);
1029 }
Krishna Vankaa3e04d82012-06-08 11:35:43 +05301030
1031bail_incorrect_governor:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001032 unlock_policy_rwsem_write(cpu);
Krishna Vankaa3e04d82012-06-08 11:35:43 +05301033
1034bail_acq_sema_failed:
1035 put_online_cpus();
1036 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001037}
1038
Rohit Guptac496cdd2013-04-04 15:45:16 -07001039static int dbs_migration_notify(struct notifier_block *nb,
1040 unsigned long target_cpu, void *arg)
1041{
1042 struct dbs_sync_work_struct *sync_work =
1043 &per_cpu(dbs_sync_work, target_cpu);
1044 sync_work->src_cpu = (unsigned int)arg;
1045
Matt Wagantall2aa4f052013-05-23 15:52:49 -07001046 queue_work_on(target_cpu, dbs_wq,
Rohit Guptac496cdd2013-04-04 15:45:16 -07001047 &per_cpu(dbs_sync_work, target_cpu).work);
1048
1049 return NOTIFY_OK;
1050}
1051
1052static struct notifier_block dbs_migration_nb = {
1053 .notifier_call = dbs_migration_notify,
1054};
1055
1056void dbs_synchronize(struct work_struct *work)
1057{
1058 struct cpufreq_policy *policy;
1059 struct cpu_dbs_info_s *this_dbs_info, *src_dbs_info;
1060 struct dbs_sync_work_struct *dbs_work;
1061 unsigned int cpu, src_cpu;
1062 unsigned int src_freq, src_max_load;
1063 int delay;
1064
1065 dbs_work = container_of(work, struct dbs_sync_work_struct, work);
1066 cpu = dbs_work->targ_cpu;
1067 src_cpu = dbs_work->src_cpu;
1068
1069 get_online_cpus();
1070
1071 /* Getting source cpu info */
1072 src_dbs_info = &per_cpu(od_cpu_dbs_info, src_cpu);
1073 if (src_dbs_info != NULL && src_dbs_info->cur_policy != NULL) {
1074 src_freq = src_dbs_info->cur_policy->cur;
1075 src_max_load = src_dbs_info->max_load;
1076 } else {
1077 src_freq = dbs_tuners_ins.sync_freq;
1078 src_max_load = 0;
1079 }
1080
1081 if (lock_policy_rwsem_write(cpu) < 0)
1082 goto bail_acq_sema_failed;
1083
1084 this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
1085 policy = this_dbs_info->cur_policy;
1086 if (!policy) {
1087 /* CPU not using ondemand governor */
1088 goto bail_incorrect_governor;
1089 }
1090
1091 delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
1092
1093 if (policy->cur < src_freq) {
1094
1095 /* Cancelling the next ondemand sample */
1096 cancel_delayed_work_sync(&this_dbs_info->work);
1097
1098 /*
1099 * Arch specific cpufreq driver may fail.
1100 * Don't update governor frequency upon failure.
1101 */
1102 if (__cpufreq_driver_target(policy, src_freq,
1103 CPUFREQ_RELATION_L) >= 0) {
1104 policy->cur = src_freq;
1105 if (src_max_load > this_dbs_info->max_load) {
1106 this_dbs_info->max_load = src_max_load;
1107 this_dbs_info->prev_load = src_max_load;
1108 }
1109 }
1110
1111 /* Rescheduling the next ondemand sample */
1112 mutex_lock(&this_dbs_info->timer_mutex);
1113 schedule_delayed_work_on(cpu, &this_dbs_info->work,
1114 delay);
1115 mutex_unlock(&this_dbs_info->timer_mutex);
1116 }
1117bail_incorrect_governor:
1118 unlock_policy_rwsem_write(cpu);
1119
1120bail_acq_sema_failed:
1121 put_online_cpus();
1122 return;
1123}
1124
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001125static void dbs_input_event(struct input_handle *handle, unsigned int type,
1126 unsigned int code, int value)
1127{
Matt Wagantall2100f002012-10-19 15:26:48 -07001128 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001129
David Ng8192a2f2012-01-19 14:16:19 -08001130 if ((dbs_tuners_ins.powersave_bias == POWERSAVE_BIAS_MAXLEVEL) ||
1131 (dbs_tuners_ins.powersave_bias == POWERSAVE_BIAS_MINLEVEL)) {
1132 /* nothing to do */
1133 return;
1134 }
1135
Stephen Boydc8fc3012012-10-31 17:43:08 -07001136 for_each_online_cpu(i)
Matt Wagantall2aa4f052013-05-23 15:52:49 -07001137 queue_work_on(i, dbs_wq, &per_cpu(dbs_refresh_work, i).work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001138}
1139
1140static int dbs_input_connect(struct input_handler *handler,
1141 struct input_dev *dev, const struct input_device_id *id)
1142{
1143 struct input_handle *handle;
1144 int error;
1145
1146 handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
1147 if (!handle)
1148 return -ENOMEM;
1149
1150 handle->dev = dev;
1151 handle->handler = handler;
1152 handle->name = "cpufreq";
1153
1154 error = input_register_handle(handle);
1155 if (error)
1156 goto err2;
1157
1158 error = input_open_device(handle);
1159 if (error)
1160 goto err1;
1161
1162 return 0;
1163err1:
1164 input_unregister_handle(handle);
1165err2:
1166 kfree(handle);
1167 return error;
1168}
1169
1170static void dbs_input_disconnect(struct input_handle *handle)
1171{
1172 input_close_device(handle);
1173 input_unregister_handle(handle);
1174 kfree(handle);
1175}
1176
1177static const struct input_device_id dbs_ids[] = {
1178 { .driver_info = 1 },
1179 { },
1180};
1181
1182static struct input_handler dbs_input_handler = {
1183 .event = dbs_input_event,
1184 .connect = dbs_input_connect,
1185 .disconnect = dbs_input_disconnect,
1186 .name = "cpufreq_ond",
1187 .id_table = dbs_ids,
1188};
1189
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
1191 unsigned int event)
1192{
1193 unsigned int cpu = policy->cpu;
1194 struct cpu_dbs_info_s *this_dbs_info;
1195 unsigned int j;
Jeff Garzik914f7c32006-10-20 14:31:00 -07001196 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197
Tejun Heo245b2e72009-06-24 15:13:48 +09001198 this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199
1200 switch (event) {
1201 case CPUFREQ_GOV_START:
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -07001202 if ((!cpu_online(cpu)) || (!policy->cur))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 return -EINVAL;
1204
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08001205 mutex_lock(&dbs_mutex);
Jeff Garzik914f7c32006-10-20 14:31:00 -07001206
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -07001207 dbs_enable++;
Rusty Russell835481d2009-01-04 05:18:06 -08001208 for_each_cpu(j, policy->cpus) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 struct cpu_dbs_info_s *j_dbs_info;
Tejun Heo245b2e72009-06-24 15:13:48 +09001210 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 j_dbs_info->cur_policy = policy;
Dave Jones32ee8c32006-02-28 00:43:23 -05001212
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -07001213 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
1214 &j_dbs_info->prev_cpu_wall);
Glauber Costa3292beb2011-11-28 14:45:17 -02001215 if (dbs_tuners_ins.ignore_nice)
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -05001216 j_dbs_info->prev_cpu_nice =
Glauber Costa3292beb2011-11-28 14:45:17 -02001217 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 }
Venkatesh Pallipadi529af7a2007-02-05 16:12:44 -08001219 this_dbs_info->cpu = cpu;
David C Niemi3f78a9f2010-10-06 16:54:24 -04001220 this_dbs_info->rate_mult = 1;
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -07001221 ondemand_powersave_bias_init_cpu(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 /*
1223 * Start the timerschedule work, when this governor
1224 * is used for first time
1225 */
1226 if (dbs_enable == 1) {
1227 unsigned int latency;
Thomas Renninger0e625ac2009-07-24 15:25:06 +02001228
1229 rc = sysfs_create_group(cpufreq_global_kobject,
1230 &dbs_attr_group);
1231 if (rc) {
1232 mutex_unlock(&dbs_mutex);
1233 return rc;
1234 }
1235
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 /* policy latency is in nS. Convert it to uS first */
Dave Jonesdf8b59b2005-09-20 12:39:35 -07001237 latency = policy->cpuinfo.transition_latency / 1000;
1238 if (latency == 0)
1239 latency = 1;
Thomas Renningercef96152009-04-22 13:48:29 +02001240 /* Bring kernel and HW constraints together */
1241 min_sampling_rate = max(min_sampling_rate,
1242 MIN_LATENCY_MULTIPLIER * latency);
1243 dbs_tuners_ins.sampling_rate =
1244 max(min_sampling_rate,
1245 latency * LATENCY_MULTIPLIER);
Arjan van de Ven19379b12010-05-09 08:26:51 -07001246 dbs_tuners_ins.io_is_busy = should_io_be_busy();
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -07001247
1248 if (dbs_tuners_ins.optimal_freq == 0)
1249 dbs_tuners_ins.optimal_freq = policy->min;
1250
1251 if (dbs_tuners_ins.sync_freq == 0)
1252 dbs_tuners_ins.sync_freq = policy->min;
Rohit Guptac496cdd2013-04-04 15:45:16 -07001253
1254 atomic_notifier_chain_register(&migration_notifier_head,
1255 &dbs_migration_nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001257 if (!cpu)
1258 rc = input_register_handler(&dbs_input_handler);
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08001259 mutex_unlock(&dbs_mutex);
venkatesh.pallipadi@intel.com7d26e2d2009-07-02 17:08:30 -07001260
David Ng8192a2f2012-01-19 14:16:19 -08001261
1262 if (!ondemand_powersave_bias_setspeed(
1263 this_dbs_info->cur_policy,
1264 NULL,
1265 dbs_tuners_ins.powersave_bias))
1266 dbs_timer_init(this_dbs_info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 break;
1268
1269 case CPUFREQ_GOV_STOP:
Linus Torvalds2cd7cbd2006-07-23 12:05:00 -07001270 dbs_timer_exit(this_dbs_info);
venkatesh.pallipadi@intel.com7d26e2d2009-07-02 17:08:30 -07001271
1272 mutex_lock(&dbs_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 dbs_enable--;
Anitha Anand3dd65092012-01-18 17:17:40 -08001274 /* If device is being removed, policy is no longer
1275 * valid. */
1276 this_dbs_info->cur_policy = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001277 if (!cpu)
1278 input_unregister_handler(&dbs_input_handler);
Rohit Guptac496cdd2013-04-04 15:45:16 -07001279 if (!dbs_enable) {
Thomas Renninger0e625ac2009-07-24 15:25:06 +02001280 sysfs_remove_group(cpufreq_global_kobject,
1281 &dbs_attr_group);
Rohit Guptac496cdd2013-04-04 15:45:16 -07001282 atomic_notifier_chain_unregister(
1283 &migration_notifier_head,
1284 &dbs_migration_nb);
1285 }
1286
Venkat Devarasetty4edc7662013-01-30 18:08:51 +05301287 mutex_unlock(&dbs_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288
1289 break;
1290
1291 case CPUFREQ_GOV_LIMITS:
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -07001292 mutex_lock(&this_dbs_info->timer_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 if (policy->max < this_dbs_info->cur_policy->cur)
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -07001294 __cpufreq_driver_target(this_dbs_info->cur_policy,
Dave Jones2b03f892009-01-18 01:43:44 -05001295 policy->max, CPUFREQ_RELATION_H);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 else if (policy->min > this_dbs_info->cur_policy->cur)
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -07001297 __cpufreq_driver_target(this_dbs_info->cur_policy,
Dave Jones2b03f892009-01-18 01:43:44 -05001298 policy->min, CPUFREQ_RELATION_L);
David Ng8192a2f2012-01-19 14:16:19 -08001299 else if (dbs_tuners_ins.powersave_bias != 0)
1300 ondemand_powersave_bias_setspeed(
1301 this_dbs_info->cur_policy,
1302 policy,
1303 dbs_tuners_ins.powersave_bias);
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -07001304 mutex_unlock(&this_dbs_info->timer_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305 break;
1306 }
1307 return 0;
1308}
1309
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310static int __init cpufreq_gov_dbs_init(void)
1311{
Andrea Righi4f6e6b92008-09-18 10:43:40 +00001312 u64 idle_time;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001313 unsigned int i;
Andrea Righi4f6e6b92008-09-18 10:43:40 +00001314 int cpu = get_cpu();
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -07001315
Kamalesh Babulal21f2e3c2011-12-09 16:18:42 +05301316 idle_time = get_cpu_idle_time_us(cpu, NULL);
Andrea Righi4f6e6b92008-09-18 10:43:40 +00001317 put_cpu();
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -07001318 if (idle_time != -1ULL) {
1319 /* Idle micro accounting is supported. Use finer thresholds */
1320 dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
1321 dbs_tuners_ins.down_differential =
1322 MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
Thomas Renningercef96152009-04-22 13:48:29 +02001323 /*
Paul Bollebd74b322011-08-06 14:33:43 +02001324 * In nohz/micro accounting case we set the minimum frequency
Thomas Renningercef96152009-04-22 13:48:29 +02001325 * not depending on HZ, but fixed (very low). The deferred
1326 * timer might skip some samples if idle/sleeping as needed.
1327 */
1328 min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
1329 } else {
1330 /* For correct statistics, we need 10 ticks for each measure */
1331 min_sampling_rate =
1332 MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -07001333 }
Akinobu Mita888a7942008-07-14 12:00:45 +09001334
Matt Wagantall2aa4f052013-05-23 15:52:49 -07001335 dbs_wq = alloc_workqueue("ondemand_dbs_wq", WQ_HIGHPRI, 0);
1336 if (!dbs_wq) {
1337 printk(KERN_ERR "Failed to create ondemand_dbs_wq workqueue\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001338 return -EFAULT;
1339 }
1340 for_each_possible_cpu(i) {
Praveen Chidambaram457a4452012-07-19 10:45:07 -06001341 struct cpu_dbs_info_s *this_dbs_info =
1342 &per_cpu(od_cpu_dbs_info, i);
Stephen Boydc8fc3012012-10-31 17:43:08 -07001343 struct dbs_work_struct *dbs_work =
1344 &per_cpu(dbs_refresh_work, i);
Rohit Guptac496cdd2013-04-04 15:45:16 -07001345 struct dbs_sync_work_struct *dbs_sync =
1346 &per_cpu(dbs_sync_work, i);
Stephen Boydc8fc3012012-10-31 17:43:08 -07001347
Praveen Chidambaram457a4452012-07-19 10:45:07 -06001348 mutex_init(&this_dbs_info->timer_mutex);
Stephen Boydc8fc3012012-10-31 17:43:08 -07001349 INIT_WORK(&dbs_work->work, dbs_refresh_callback);
1350 dbs_work->cpu = i;
Rohit Guptac496cdd2013-04-04 15:45:16 -07001351
1352 INIT_WORK(&dbs_sync->work, dbs_synchronize);
1353 dbs_sync->src_cpu = 0;
1354 dbs_sync->targ_cpu = i;
1355
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001356 }
1357
Tejun Heo57df5572011-01-26 12:12:50 +01001358 return cpufreq_register_governor(&cpufreq_gov_ondemand);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359}
1360
1361static void __exit cpufreq_gov_dbs_exit(void)
1362{
Anji Jonnala4c1485f2012-11-14 13:34:54 +05301363 unsigned int i;
1364
Thomas Renninger1c256242007-10-02 13:28:12 -07001365 cpufreq_unregister_governor(&cpufreq_gov_ondemand);
Anji Jonnala4c1485f2012-11-14 13:34:54 +05301366 for_each_possible_cpu(i) {
1367 struct cpu_dbs_info_s *this_dbs_info =
1368 &per_cpu(od_cpu_dbs_info, i);
1369 mutex_destroy(&this_dbs_info->timer_mutex);
1370 }
Matt Wagantall2aa4f052013-05-23 15:52:49 -07001371 destroy_workqueue(dbs_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372}
1373
1374
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -07001375MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
1376MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
1377MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
Dave Jones2b03f892009-01-18 01:43:44 -05001378 "Low Latency Frequency Transition capable processors");
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -07001379MODULE_LICENSE("GPL");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380
Johannes Weiner69157192008-01-17 15:21:08 -08001381#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
1382fs_initcall(cpufreq_gov_dbs_init);
1383#else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384module_init(cpufreq_gov_dbs_init);
Johannes Weiner69157192008-01-17 15:21:08 -08001385#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386module_exit(cpufreq_gov_dbs_exit);