blob: af494c65754b515d04527b0131c7d3672a3449c6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * drivers/cpufreq/cpufreq_ondemand.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
Rohit Guptac496cdd2013-04-04 15:45:16 -07007 * (c) 2013 The Linux Foundation. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/cpufreq.h>
Andrew Morton138a01282006-06-23 03:31:19 -070018#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/jiffies.h>
20#include <linux/kernel_stat.h>
akpm@osdl.org3fc54d32006-01-13 15:54:22 -080021#include <linux/mutex.h>
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -070022#include <linux/hrtimer.h>
23#include <linux/tick.h>
24#include <linux/ktime.h>
Steve Muckle538cfc12013-05-31 10:39:31 -070025#include <linux/kthread.h>
Thomas Renninger9411b4e2009-02-04 11:54:04 +010026#include <linux/sched.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027#include <linux/input.h>
28#include <linux/workqueue.h>
29#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
31/*
32 * dbs is used in this file as a shortform for demandbased switching
33 * It helps to keep variable names smaller, simpler
34 */
35
venkatesh.pallipadi@intel.come9d95bf2008-08-04 11:59:10 -070036#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#define DEF_FREQUENCY_UP_THRESHOLD (80)
David C Niemi3f78a9f2010-10-06 16:54:24 -040038#define DEF_SAMPLING_DOWN_FACTOR (1)
39#define MAX_SAMPLING_DOWN_FACTOR (100000)
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -070040#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
41#define MICRO_FREQUENCY_UP_THRESHOLD (95)
Thomas Renningercef96152009-04-22 13:48:29 +020042#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
Dave Jonesc29f1402005-05-31 19:03:50 -070043#define MIN_FREQUENCY_UP_THRESHOLD (11)
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#define MAX_FREQUENCY_UP_THRESHOLD (100)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045#define MIN_FREQUENCY_DOWN_DIFFERENTIAL (1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Dave Jones32ee8c32006-02-28 00:43:23 -050047/*
48 * The polling frequency of this governor depends on the capability of
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 * the processor. Default polling frequency is 1000 times the transition
Dave Jones32ee8c32006-02-28 00:43:23 -050050 * latency of the processor. The governor will work on any processor with
51 * transition latency <= 10mS, using appropriate sampling
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 * rate.
53 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
54 * this governor will not work.
55 * All times here are in uS.
56 */
Dave Jonesdf8b59b2005-09-20 12:39:35 -070057#define MIN_SAMPLING_RATE_RATIO (2)
Thomas Renninger112124a2009-02-04 11:55:12 +010058
Thomas Renningercef96152009-04-22 13:48:29 +020059static unsigned int min_sampling_rate;
60
Thomas Renninger112124a2009-02-04 11:55:12 +010061#define LATENCY_MULTIPLIER (1000)
Thomas Renningercef96152009-04-22 13:48:29 +020062#define MIN_LATENCY_MULTIPLIER (100)
Thomas Renninger1c256242007-10-02 13:28:12 -070063#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
David Ng8192a2f2012-01-19 14:16:19 -080065#define POWERSAVE_BIAS_MAXLEVEL (1000)
66#define POWERSAVE_BIAS_MINLEVEL (-1000)
67
David Howellsc4028952006-11-22 14:57:56 +000068static void do_dbs_timer(struct work_struct *work);
Thomas Renninger0e625ac2009-07-24 15:25:06 +020069static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
70 unsigned int event);
71
72#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
73static
74#endif
75struct cpufreq_governor cpufreq_gov_ondemand = {
76 .name = "ondemand",
77 .governor = cpufreq_governor_dbs,
78 .max_transition_latency = TRANSITION_LATENCY_LIMIT,
79 .owner = THIS_MODULE,
80};
David Howellsc4028952006-11-22 14:57:56 +000081
82/* Sampling types */
Venkatesh Pallipadi529af7a2007-02-05 16:12:44 -080083enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
85struct cpu_dbs_info_s {
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -070086 cputime64_t prev_cpu_idle;
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -070087 cputime64_t prev_cpu_iowait;
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -070088 cputime64_t prev_cpu_wall;
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -070089 cputime64_t prev_cpu_nice;
Dave Jones32ee8c32006-02-28 00:43:23 -050090 struct cpufreq_policy *cur_policy;
Dave Jones2b03f892009-01-18 01:43:44 -050091 struct delayed_work work;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +040092 struct cpufreq_frequency_table *freq_table;
93 unsigned int freq_lo;
94 unsigned int freq_lo_jiffies;
95 unsigned int freq_hi_jiffies;
David C Niemi3f78a9f2010-10-06 16:54:24 -040096 unsigned int rate_mult;
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -070097 unsigned int prev_load;
98 unsigned int max_load;
Venkatesh Pallipadi529af7a2007-02-05 16:12:44 -080099 int cpu;
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700100 unsigned int sample_type:1;
101 /*
102 * percpu mutex that serializes governor limit change with
103 * do_dbs_timer invocation. We do not want do_dbs_timer to run
104 * when user is changing the governor or limits.
105 */
106 struct mutex timer_mutex;
Steve Muckle538cfc12013-05-31 10:39:31 -0700107
108 struct task_struct *sync_thread;
109 wait_queue_head_t sync_wq;
110 atomic_t src_sync_cpu;
111 atomic_t sync_enabled;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112};
Tejun Heo245b2e72009-06-24 15:13:48 +0900113static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
David Ng8192a2f2012-01-19 14:16:19 -0800115static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info);
116static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info);
117
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118static unsigned int dbs_enable; /* number of CPUs using this policy */
119
Venkatesh Pallipadi4ec223d2006-06-21 15:18:34 -0700120/*
Matt Wagantall46aa0662013-05-31 20:02:01 -0700121 * dbs_mutex protects dbs_enable and dbs_info during start/stop.
Venkatesh Pallipadi4ec223d2006-06-21 15:18:34 -0700122 */
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -0700123static DEFINE_MUTEX(dbs_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
Matt Wagantall2aa4f052013-05-23 15:52:49 -0700125static struct workqueue_struct *dbs_wq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700126
Stephen Boydc8fc3012012-10-31 17:43:08 -0700127struct dbs_work_struct {
128 struct work_struct work;
129 unsigned int cpu;
130};
131
132static DEFINE_PER_CPU(struct dbs_work_struct, dbs_refresh_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700133
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400134static struct dbs_tuners {
Dave Jones32ee8c32006-02-28 00:43:23 -0500135 unsigned int sampling_rate;
Dave Jones32ee8c32006-02-28 00:43:23 -0500136 unsigned int up_threshold;
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700137 unsigned int up_threshold_multi_core;
venkatesh.pallipadi@intel.come9d95bf2008-08-04 11:59:10 -0700138 unsigned int down_differential;
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700139 unsigned int down_differential_multi_core;
140 unsigned int optimal_freq;
141 unsigned int up_threshold_any_cpu_load;
142 unsigned int sync_freq;
Dave Jones32ee8c32006-02-28 00:43:23 -0500143 unsigned int ignore_nice;
David C Niemi3f78a9f2010-10-06 16:54:24 -0400144 unsigned int sampling_down_factor;
David Ng8192a2f2012-01-19 14:16:19 -0800145 int powersave_bias;
Arjan van de Ven19379b12010-05-09 08:26:51 -0700146 unsigned int io_is_busy;
Dilip Gudlurf78bea22013-06-17 13:04:31 -0700147 unsigned int input_boost;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400148} dbs_tuners_ins = {
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700149 .up_threshold_multi_core = DEF_FREQUENCY_UP_THRESHOLD,
Dave Jones32ee8c32006-02-28 00:43:23 -0500150 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
David C Niemi3f78a9f2010-10-06 16:54:24 -0400151 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
venkatesh.pallipadi@intel.come9d95bf2008-08-04 11:59:10 -0700152 .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700153 .down_differential_multi_core = MICRO_FREQUENCY_DOWN_DIFFERENTIAL,
154 .up_threshold_any_cpu_load = DEF_FREQUENCY_UP_THRESHOLD,
Eric Piel9cbad612006-03-10 11:35:27 +0200155 .ignore_nice = 0,
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400156 .powersave_bias = 0,
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700157 .sync_freq = 0,
158 .optimal_freq = 0,
Dilip Gudlurf78bea22013-06-17 13:04:31 -0700159 .input_boost = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160};
161
Glauber Costa3292beb2011-11-28 14:45:17 -0200162static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
Dave Jonesdac1c1a2005-05-31 19:03:49 -0700163{
Glauber Costa3292beb2011-11-28 14:45:17 -0200164 u64 idle_time;
Martin Schwidefsky612ef282011-12-19 19:23:15 +0100165 u64 cur_wall_time;
Glauber Costa3292beb2011-11-28 14:45:17 -0200166 u64 busy_time;
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700167
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700168 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700169
Martin Schwidefsky612ef282011-12-19 19:23:15 +0100170 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
171 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
Glauber Costa3292beb2011-11-28 14:45:17 -0200172 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
173 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
174 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
175 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700176
Martin Schwidefsky64861632011-12-15 14:56:09 +0100177 idle_time = cur_wall_time - busy_time;
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700178 if (wall)
Glauber Costa3292beb2011-11-28 14:45:17 -0200179 *wall = jiffies_to_usecs(cur_wall_time);
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700180
Glauber Costa3292beb2011-11-28 14:45:17 -0200181 return jiffies_to_usecs(idle_time);
Dave Jonesdac1c1a2005-05-31 19:03:49 -0700182}
183
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -0700184static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
185{
Michal Hocko6beea0c2011-08-24 09:37:48 +0200186 u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -0700187
188 if (idle_time == -1ULL)
189 return get_cpu_idle_time_jiffy(cpu, wall);
Michal Hocko6beea0c2011-08-24 09:37:48 +0200190 else
191 idle_time += get_cpu_iowait_time_us(cpu, wall);
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -0700192
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -0700193 return idle_time;
194}
195
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700196static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall)
197{
198 u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
199
200 if (iowait_time == -1ULL)
201 return 0;
202
203 return iowait_time;
204}
205
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400206/*
207 * Find right freq to be set now with powersave_bias on.
208 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
209 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
210 */
Adrian Bunkb5ecf602006-08-13 23:00:08 +0200211static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
212 unsigned int freq_next,
213 unsigned int relation)
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400214{
David Ng8192a2f2012-01-19 14:16:19 -0800215 unsigned int freq_req, freq_avg;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400216 unsigned int freq_hi, freq_lo;
217 unsigned int index = 0;
218 unsigned int jiffies_total, jiffies_hi, jiffies_lo;
David Ng8192a2f2012-01-19 14:16:19 -0800219 int freq_reduc;
Tejun Heo245b2e72009-06-24 15:13:48 +0900220 struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
221 policy->cpu);
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400222
223 if (!dbs_info->freq_table) {
224 dbs_info->freq_lo = 0;
225 dbs_info->freq_lo_jiffies = 0;
226 return freq_next;
227 }
228
229 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
230 relation, &index);
231 freq_req = dbs_info->freq_table[index].frequency;
232 freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
233 freq_avg = freq_req - freq_reduc;
234
235 /* Find freq bounds for freq_avg in freq_table */
236 index = 0;
237 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
238 CPUFREQ_RELATION_H, &index);
239 freq_lo = dbs_info->freq_table[index].frequency;
240 index = 0;
241 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
242 CPUFREQ_RELATION_L, &index);
243 freq_hi = dbs_info->freq_table[index].frequency;
244
245 /* Find out how long we have to be in hi and lo freqs */
246 if (freq_hi == freq_lo) {
247 dbs_info->freq_lo = 0;
248 dbs_info->freq_lo_jiffies = 0;
249 return freq_lo;
250 }
251 jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
252 jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
253 jiffies_hi += ((freq_hi - freq_lo) / 2);
254 jiffies_hi /= (freq_hi - freq_lo);
255 jiffies_lo = jiffies_total - jiffies_hi;
256 dbs_info->freq_lo = freq_lo;
257 dbs_info->freq_lo_jiffies = jiffies_lo;
258 dbs_info->freq_hi_jiffies = jiffies_hi;
259 return freq_hi;
260}
261
David Ng8192a2f2012-01-19 14:16:19 -0800262static int ondemand_powersave_bias_setspeed(struct cpufreq_policy *policy,
263 struct cpufreq_policy *altpolicy,
264 int level)
265{
266 if (level == POWERSAVE_BIAS_MAXLEVEL) {
267 /* maximum powersave; set to lowest frequency */
268 __cpufreq_driver_target(policy,
269 (altpolicy) ? altpolicy->min : policy->min,
270 CPUFREQ_RELATION_L);
271 return 1;
272 } else if (level == POWERSAVE_BIAS_MINLEVEL) {
273 /* minimum powersave; set to highest frequency */
274 __cpufreq_driver_target(policy,
275 (altpolicy) ? altpolicy->max : policy->max,
276 CPUFREQ_RELATION_H);
277 return 1;
278 }
279 return 0;
280}
281
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700282static void ondemand_powersave_bias_init_cpu(int cpu)
283{
Tejun Heo384be2b2009-08-14 14:41:02 +0900284 struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700285 dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
286 dbs_info->freq_lo = 0;
287}
288
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400289static void ondemand_powersave_bias_init(void)
290{
291 int i;
292 for_each_online_cpu(i) {
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700293 ondemand_powersave_bias_init_cpu(i);
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400294 }
295}
296
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297/************************** sysfs interface ************************/
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200298
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200299static ssize_t show_sampling_rate_min(struct kobject *kobj,
300 struct attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301{
Thomas Renningercef96152009-04-22 13:48:29 +0200302 return sprintf(buf, "%u\n", min_sampling_rate);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303}
304
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200305define_one_global_ro(sampling_rate_min);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306
307/* cpufreq_ondemand Governor Tunables */
308#define show_one(file_name, object) \
309static ssize_t show_##file_name \
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200310(struct kobject *kobj, struct attribute *attr, char *buf) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311{ \
312 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
313}
314show_one(sampling_rate, sampling_rate);
Arjan van de Ven19379b12010-05-09 08:26:51 -0700315show_one(io_is_busy, io_is_busy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316show_one(up_threshold, up_threshold);
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700317show_one(up_threshold_multi_core, up_threshold_multi_core);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700318show_one(down_differential, down_differential);
David C Niemi3f78a9f2010-10-06 16:54:24 -0400319show_one(sampling_down_factor, sampling_down_factor);
Alexander Clouter001893c2005-12-01 01:09:25 -0800320show_one(ignore_nice_load, ignore_nice);
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700321show_one(optimal_freq, optimal_freq);
322show_one(up_threshold_any_cpu_load, up_threshold_any_cpu_load);
323show_one(sync_freq, sync_freq);
Dilip Gudlurf78bea22013-06-17 13:04:31 -0700324show_one(input_boost, input_boost);
David Ng8192a2f2012-01-19 14:16:19 -0800325
326static ssize_t show_powersave_bias
327(struct kobject *kobj, struct attribute *attr, char *buf)
328{
329 return snprintf(buf, PAGE_SIZE, "%d\n", dbs_tuners_ins.powersave_bias);
330}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
MyungJoo Hamfd0ef7a2012-02-29 17:54:41 +0900332/**
333 * update_sampling_rate - update sampling rate effective immediately if needed.
334 * @new_rate: new sampling rate
335 *
336 * If new rate is smaller than the old, simply updaing
337 * dbs_tuners_int.sampling_rate might not be appropriate. For example,
338 * if the original sampling_rate was 1 second and the requested new sampling
339 * rate is 10 ms because the user needs immediate reaction from ondemand
340 * governor, but not sure if higher frequency will be required or not,
341 * then, the governor may change the sampling rate too late; up to 1 second
342 * later. Thus, if we are reducing the sampling rate, we need to make the
343 * new value effective immediately.
344 */
345static void update_sampling_rate(unsigned int new_rate)
346{
347 int cpu;
348
349 dbs_tuners_ins.sampling_rate = new_rate
350 = max(new_rate, min_sampling_rate);
351
Rohit Guptacf181752013-07-31 15:33:24 -0700352 get_online_cpus();
MyungJoo Hamfd0ef7a2012-02-29 17:54:41 +0900353 for_each_online_cpu(cpu) {
354 struct cpufreq_policy *policy;
355 struct cpu_dbs_info_s *dbs_info;
356 unsigned long next_sampling, appointed_at;
357
358 policy = cpufreq_cpu_get(cpu);
359 if (!policy)
360 continue;
361 dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu);
362 cpufreq_cpu_put(policy);
363
364 mutex_lock(&dbs_info->timer_mutex);
365
366 if (!delayed_work_pending(&dbs_info->work)) {
367 mutex_unlock(&dbs_info->timer_mutex);
368 continue;
369 }
370
371 next_sampling = jiffies + usecs_to_jiffies(new_rate);
372 appointed_at = dbs_info->work.timer.expires;
373
374
375 if (time_before(next_sampling, appointed_at)) {
376
377 mutex_unlock(&dbs_info->timer_mutex);
378 cancel_delayed_work_sync(&dbs_info->work);
379 mutex_lock(&dbs_info->timer_mutex);
380
Matt Wagantall2aa4f052013-05-23 15:52:49 -0700381 queue_delayed_work_on(dbs_info->cpu, dbs_wq,
382 &dbs_info->work, usecs_to_jiffies(new_rate));
MyungJoo Hamfd0ef7a2012-02-29 17:54:41 +0900383
384 }
385 mutex_unlock(&dbs_info->timer_mutex);
386 }
Rohit Guptacf181752013-07-31 15:33:24 -0700387 put_online_cpus();
MyungJoo Hamfd0ef7a2012-02-29 17:54:41 +0900388}
389
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200390static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
391 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392{
393 unsigned int input;
394 int ret;
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -0700395 ret = sscanf(buf, "%u", &input);
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700396 if (ret != 1)
397 return -EINVAL;
MyungJoo Hamfd0ef7a2012-02-29 17:54:41 +0900398 update_sampling_rate(input);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 return count;
400}
401
Dilip Gudlurf78bea22013-06-17 13:04:31 -0700402static ssize_t store_input_boost(struct kobject *a, struct attribute *b,
403 const char *buf, size_t count)
404{
405 unsigned int input;
406 int ret;
407 ret = sscanf(buf, "%u", &input);
408 if (ret != 1)
409 return -EINVAL;
410 dbs_tuners_ins.input_boost = input;
411 return count;
412}
413
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700414static ssize_t store_sync_freq(struct kobject *a, struct attribute *b,
415 const char *buf, size_t count)
416{
417 unsigned int input;
418 int ret;
419
420 ret = sscanf(buf, "%u", &input);
421 if (ret != 1)
422 return -EINVAL;
423 dbs_tuners_ins.sync_freq = input;
424 return count;
425}
426
Arjan van de Ven19379b12010-05-09 08:26:51 -0700427static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
428 const char *buf, size_t count)
429{
430 unsigned int input;
431 int ret;
432
433 ret = sscanf(buf, "%u", &input);
434 if (ret != 1)
435 return -EINVAL;
Arjan van de Ven19379b12010-05-09 08:26:51 -0700436 dbs_tuners_ins.io_is_busy = !!input;
Arjan van de Ven19379b12010-05-09 08:26:51 -0700437 return count;
438}
439
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700440static ssize_t store_optimal_freq(struct kobject *a, struct attribute *b,
441 const char *buf, size_t count)
442{
443 unsigned int input;
444 int ret;
445
446 ret = sscanf(buf, "%u", &input);
447 if (ret != 1)
448 return -EINVAL;
449 dbs_tuners_ins.optimal_freq = input;
450 return count;
451}
452
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200453static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
454 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455{
456 unsigned int input;
457 int ret;
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -0700458 ret = sscanf(buf, "%u", &input);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459
Dave Jones32ee8c32006-02-28 00:43:23 -0500460 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
Dave Jonesc29f1402005-05-31 19:03:50 -0700461 input < MIN_FREQUENCY_UP_THRESHOLD) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 return -EINVAL;
463 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 dbs_tuners_ins.up_threshold = input;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 return count;
466}
467
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700468static ssize_t store_up_threshold_multi_core(struct kobject *a,
469 struct attribute *b, const char *buf, size_t count)
470{
471 unsigned int input;
472 int ret;
473 ret = sscanf(buf, "%u", &input);
474
475 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
476 input < MIN_FREQUENCY_UP_THRESHOLD) {
477 return -EINVAL;
478 }
479 dbs_tuners_ins.up_threshold_multi_core = input;
480 return count;
481}
482
483static ssize_t store_up_threshold_any_cpu_load(struct kobject *a,
484 struct attribute *b, const char *buf, size_t count)
485{
486 unsigned int input;
487 int ret;
488 ret = sscanf(buf, "%u", &input);
489
490 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
491 input < MIN_FREQUENCY_UP_THRESHOLD) {
492 return -EINVAL;
493 }
494 dbs_tuners_ins.up_threshold_any_cpu_load = input;
495 return count;
496}
497
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700498static ssize_t store_down_differential(struct kobject *a, struct attribute *b,
499 const char *buf, size_t count)
500{
501 unsigned int input;
502 int ret;
503 ret = sscanf(buf, "%u", &input);
504
505 if (ret != 1 || input >= dbs_tuners_ins.up_threshold ||
506 input < MIN_FREQUENCY_DOWN_DIFFERENTIAL) {
507 return -EINVAL;
508 }
509
510 dbs_tuners_ins.down_differential = input;
511
512 return count;
513}
514
David C Niemi3f78a9f2010-10-06 16:54:24 -0400515static ssize_t store_sampling_down_factor(struct kobject *a,
516 struct attribute *b, const char *buf, size_t count)
517{
518 unsigned int input, j;
519 int ret;
520 ret = sscanf(buf, "%u", &input);
521
522 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
523 return -EINVAL;
David C Niemi3f78a9f2010-10-06 16:54:24 -0400524 dbs_tuners_ins.sampling_down_factor = input;
525
526 /* Reset down sampling multiplier in case it was active */
527 for_each_online_cpu(j) {
528 struct cpu_dbs_info_s *dbs_info;
529 dbs_info = &per_cpu(od_cpu_dbs_info, j);
530 dbs_info->rate_mult = 1;
531 }
David C Niemi3f78a9f2010-10-06 16:54:24 -0400532 return count;
533}
534
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200535static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
536 const char *buf, size_t count)
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700537{
538 unsigned int input;
539 int ret;
540
541 unsigned int j;
Dave Jones32ee8c32006-02-28 00:43:23 -0500542
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -0700543 ret = sscanf(buf, "%u", &input);
Dave Jones2b03f892009-01-18 01:43:44 -0500544 if (ret != 1)
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700545 return -EINVAL;
546
Dave Jones2b03f892009-01-18 01:43:44 -0500547 if (input > 1)
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700548 input = 1;
Dave Jones32ee8c32006-02-28 00:43:23 -0500549
Dave Jones2b03f892009-01-18 01:43:44 -0500550 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700551 return count;
552 }
553 dbs_tuners_ins.ignore_nice = input;
554
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700555 /* we need to re-evaluate prev_cpu_idle */
Dave Jonesdac1c1a2005-05-31 19:03:49 -0700556 for_each_online_cpu(j) {
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700557 struct cpu_dbs_info_s *dbs_info;
Tejun Heo245b2e72009-06-24 15:13:48 +0900558 dbs_info = &per_cpu(od_cpu_dbs_info, j);
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700559 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
560 &dbs_info->prev_cpu_wall);
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500561 if (dbs_tuners_ins.ignore_nice)
Glauber Costa3292beb2011-11-28 14:45:17 -0200562 dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500563
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700564 }
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700565 return count;
566}
567
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200568static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
569 const char *buf, size_t count)
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400570{
David Ng8192a2f2012-01-19 14:16:19 -0800571 int input = 0;
572 int bypass = 0;
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530573 int ret, cpu, reenable_timer, j;
David Ng8192a2f2012-01-19 14:16:19 -0800574 struct cpu_dbs_info_s *dbs_info;
575
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530576 struct cpumask cpus_timer_done;
577 cpumask_clear(&cpus_timer_done);
578
David Ng8192a2f2012-01-19 14:16:19 -0800579 ret = sscanf(buf, "%d", &input);
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400580
581 if (ret != 1)
582 return -EINVAL;
583
David Ng8192a2f2012-01-19 14:16:19 -0800584 if (input >= POWERSAVE_BIAS_MAXLEVEL) {
585 input = POWERSAVE_BIAS_MAXLEVEL;
586 bypass = 1;
587 } else if (input <= POWERSAVE_BIAS_MINLEVEL) {
588 input = POWERSAVE_BIAS_MINLEVEL;
589 bypass = 1;
590 }
591
592 if (input == dbs_tuners_ins.powersave_bias) {
593 /* no change */
594 return count;
595 }
596
597 reenable_timer = ((dbs_tuners_ins.powersave_bias ==
598 POWERSAVE_BIAS_MAXLEVEL) ||
599 (dbs_tuners_ins.powersave_bias ==
600 POWERSAVE_BIAS_MINLEVEL));
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400601
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400602 dbs_tuners_ins.powersave_bias = input;
Matt Wagantall46aa0662013-05-31 20:02:01 -0700603
Matt Wagantall0afbad12013-05-31 13:14:44 -0700604 get_online_cpus();
Matt Wagantall48d56722013-08-22 10:40:48 -0700605 mutex_lock(&dbs_mutex);
Matt Wagantall46aa0662013-05-31 20:02:01 -0700606
David Ng8192a2f2012-01-19 14:16:19 -0800607 if (!bypass) {
608 if (reenable_timer) {
609 /* reinstate dbs timer */
610 for_each_online_cpu(cpu) {
611 if (lock_policy_rwsem_write(cpu) < 0)
612 continue;
613
614 dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530615
616 for_each_cpu(j, &cpus_timer_done) {
617 if (!dbs_info->cur_policy) {
618 pr_err("Dbs policy is NULL\n");
619 goto skip_this_cpu;
620 }
621 if (cpumask_test_cpu(j, dbs_info->
622 cur_policy->cpus))
623 goto skip_this_cpu;
624 }
625
626 cpumask_set_cpu(cpu, &cpus_timer_done);
David Ng8192a2f2012-01-19 14:16:19 -0800627 if (dbs_info->cur_policy) {
628 /* restart dbs timer */
629 dbs_timer_init(dbs_info);
Rohit Gupta01585132013-06-17 17:56:27 -0700630 /* Enable frequency synchronization
631 * of CPUs */
632 atomic_set(&dbs_info->sync_enabled, 1);
David Ng8192a2f2012-01-19 14:16:19 -0800633 }
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530634skip_this_cpu:
David Ng8192a2f2012-01-19 14:16:19 -0800635 unlock_policy_rwsem_write(cpu);
636 }
637 }
638 ondemand_powersave_bias_init();
639 } else {
640 /* running at maximum or minimum frequencies; cancel
641 dbs timer as periodic load sampling is not necessary */
642 for_each_online_cpu(cpu) {
643 if (lock_policy_rwsem_write(cpu) < 0)
644 continue;
645
646 dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530647
648 for_each_cpu(j, &cpus_timer_done) {
649 if (!dbs_info->cur_policy) {
650 pr_err("Dbs policy is NULL\n");
651 goto skip_this_cpu_bypass;
652 }
653 if (cpumask_test_cpu(j, dbs_info->
654 cur_policy->cpus))
655 goto skip_this_cpu_bypass;
656 }
657
658 cpumask_set_cpu(cpu, &cpus_timer_done);
659
David Ng8192a2f2012-01-19 14:16:19 -0800660 if (dbs_info->cur_policy) {
661 /* cpu using ondemand, cancel dbs timer */
David Ng8192a2f2012-01-19 14:16:19 -0800662 dbs_timer_exit(dbs_info);
Rohit Gupta01585132013-06-17 17:56:27 -0700663 /* Disable frequency synchronization of
664 * CPUs to avoid re-queueing of work from
665 * sync_thread */
666 atomic_set(&dbs_info->sync_enabled, 0);
David Ng8192a2f2012-01-19 14:16:19 -0800667
Rohit Gupta3476e4f2013-06-17 16:57:08 -0700668 mutex_lock(&dbs_info->timer_mutex);
David Ng8192a2f2012-01-19 14:16:19 -0800669 ondemand_powersave_bias_setspeed(
670 dbs_info->cur_policy,
671 NULL,
672 input);
David Ng8192a2f2012-01-19 14:16:19 -0800673 mutex_unlock(&dbs_info->timer_mutex);
Rohit Gupta3476e4f2013-06-17 16:57:08 -0700674
David Ng8192a2f2012-01-19 14:16:19 -0800675 }
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530676skip_this_cpu_bypass:
David Ng8192a2f2012-01-19 14:16:19 -0800677 unlock_policy_rwsem_write(cpu);
678 }
679 }
Matt Wagantall46aa0662013-05-31 20:02:01 -0700680
Matt Wagantall46aa0662013-05-31 20:02:01 -0700681 mutex_unlock(&dbs_mutex);
Matt Wagantall48d56722013-08-22 10:40:48 -0700682 put_online_cpus();
David Ng8192a2f2012-01-19 14:16:19 -0800683
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400684 return count;
685}
686
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200687define_one_global_rw(sampling_rate);
Linus Torvalds07d77752010-05-18 08:49:13 -0700688define_one_global_rw(io_is_busy);
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200689define_one_global_rw(up_threshold);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700690define_one_global_rw(down_differential);
David C Niemi3f78a9f2010-10-06 16:54:24 -0400691define_one_global_rw(sampling_down_factor);
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200692define_one_global_rw(ignore_nice_load);
693define_one_global_rw(powersave_bias);
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700694define_one_global_rw(up_threshold_multi_core);
695define_one_global_rw(optimal_freq);
696define_one_global_rw(up_threshold_any_cpu_load);
697define_one_global_rw(sync_freq);
Dilip Gudlurf78bea22013-06-17 13:04:31 -0700698define_one_global_rw(input_boost);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699
Dave Jones2b03f892009-01-18 01:43:44 -0500700static struct attribute *dbs_attributes[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 &sampling_rate_min.attr,
702 &sampling_rate.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 &up_threshold.attr,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700704 &down_differential.attr,
David C Niemi3f78a9f2010-10-06 16:54:24 -0400705 &sampling_down_factor.attr,
Alexander Clouter001893c2005-12-01 01:09:25 -0800706 &ignore_nice_load.attr,
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400707 &powersave_bias.attr,
Arjan van de Ven19379b12010-05-09 08:26:51 -0700708 &io_is_busy.attr,
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700709 &up_threshold_multi_core.attr,
710 &optimal_freq.attr,
711 &up_threshold_any_cpu_load.attr,
712 &sync_freq.attr,
Dilip Gudlurf78bea22013-06-17 13:04:31 -0700713 &input_boost.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 NULL
715};
716
717static struct attribute_group dbs_attr_group = {
718 .attrs = dbs_attributes,
719 .name = "ondemand",
720};
721
722/************************** sysfs end ************************/
723
Mike Chan00e299f2010-01-26 17:06:47 -0800724static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
725{
726 if (dbs_tuners_ins.powersave_bias)
727 freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
728 else if (p->cur == p->max)
729 return;
730
731 __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ?
732 CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
733}
734
Venkatesh Pallipadi2f8a8352006-06-28 13:51:19 -0700735static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736{
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800737 /* Extrapolated load of this CPU */
738 unsigned int load_at_max_freq = 0;
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700739 unsigned int max_load_freq;
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800740 /* Current load across this CPU */
741 unsigned int cur_load = 0;
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700742 unsigned int max_load_other_cpu = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 struct cpufreq_policy *policy;
744 unsigned int j;
745
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400746 this_dbs_info->freq_lo = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 policy = this_dbs_info->cur_policy;
Venki Pallipadiea487612007-06-20 14:26:24 -0700748
Dave Jones32ee8c32006-02-28 00:43:23 -0500749 /*
Dave Jonesc29f1402005-05-31 19:03:50 -0700750 * Every sampling_rate, we check, if current idle time is less
751 * than 20% (default), then we try to increase frequency
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700752 * Every sampling_rate, we look for a the lowest
Dave Jonesc29f1402005-05-31 19:03:50 -0700753 * frequency which can sustain the load while keeping idle time over
754 * 30%. If such a frequency exist, we try to decrease to this frequency.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 *
Dave Jones32ee8c32006-02-28 00:43:23 -0500756 * Any frequency increase takes it to the maximum frequency.
757 * Frequency reduction happens at minimum steps of
758 * 5% (default) of current frequency
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 */
760
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700761 /* Get Absolute Load - in terms of freq */
762 max_load_freq = 0;
763
Rusty Russell835481d2009-01-04 05:18:06 -0800764 for_each_cpu(j, policy->cpus) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 struct cpu_dbs_info_s *j_dbs_info;
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700766 cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
767 unsigned int idle_time, wall_time, iowait_time;
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800768 unsigned int load_freq;
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700769 int freq_avg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770
Tejun Heo245b2e72009-06-24 15:13:48 +0900771 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700772
773 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700774 cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700775
Martin Schwidefsky64861632011-12-15 14:56:09 +0100776 wall_time = (unsigned int)
777 (cur_wall_time - j_dbs_info->prev_cpu_wall);
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700778 j_dbs_info->prev_cpu_wall = cur_wall_time;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779
Martin Schwidefsky64861632011-12-15 14:56:09 +0100780 idle_time = (unsigned int)
781 (cur_idle_time - j_dbs_info->prev_cpu_idle);
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700782 j_dbs_info->prev_cpu_idle = cur_idle_time;
783
Martin Schwidefsky64861632011-12-15 14:56:09 +0100784 iowait_time = (unsigned int)
785 (cur_iowait_time - j_dbs_info->prev_cpu_iowait);
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700786 j_dbs_info->prev_cpu_iowait = cur_iowait_time;
787
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500788 if (dbs_tuners_ins.ignore_nice) {
Glauber Costa3292beb2011-11-28 14:45:17 -0200789 u64 cur_nice;
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500790 unsigned long cur_nice_jiffies;
791
Glauber Costa3292beb2011-11-28 14:45:17 -0200792 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
793 j_dbs_info->prev_cpu_nice;
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500794 /*
795 * Assumption: nice time between sampling periods will
796 * be less than 2^32 jiffies for 32 bit sys
797 */
798 cur_nice_jiffies = (unsigned long)
799 cputime64_to_jiffies64(cur_nice);
800
Glauber Costa3292beb2011-11-28 14:45:17 -0200801 j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500802 idle_time += jiffies_to_usecs(cur_nice_jiffies);
803 }
804
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700805 /*
806 * For the purpose of ondemand, waiting for disk IO is an
807 * indication that you're performance critical, and not that
808 * the system is actually idle. So subtract the iowait time
809 * from the cpu idle time.
810 */
811
Arjan van de Ven19379b12010-05-09 08:26:51 -0700812 if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time)
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700813 idle_time -= iowait_time;
814
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700815 if (unlikely(!wall_time || wall_time < idle_time))
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700816 continue;
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700817
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800818 cur_load = 100 * (wall_time - idle_time) / wall_time;
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700819 j_dbs_info->max_load = max(cur_load, j_dbs_info->prev_load);
820 j_dbs_info->prev_load = cur_load;
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700821 freq_avg = __cpufreq_driver_getavg(policy, j);
822 if (freq_avg <= 0)
823 freq_avg = policy->cur;
824
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800825 load_freq = cur_load * freq_avg;
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700826 if (load_freq > max_load_freq)
827 max_load_freq = load_freq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 }
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700829
830 for_each_online_cpu(j) {
831 struct cpu_dbs_info_s *j_dbs_info;
832 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
833
834 if (j == policy->cpu)
835 continue;
836
837 if (max_load_other_cpu < j_dbs_info->max_load)
838 max_load_other_cpu = j_dbs_info->max_load;
839 /*
840 * The other cpu could be running at higher frequency
841 * but may not have completed it's sampling_down_factor.
842 * For that case consider other cpu is loaded so that
843 * frequency imbalance does not occur.
844 */
845
846 if ((j_dbs_info->cur_policy != NULL)
847 && (j_dbs_info->cur_policy->cur ==
848 j_dbs_info->cur_policy->max)) {
849
850 if (policy->cur >= dbs_tuners_ins.optimal_freq)
851 max_load_other_cpu =
852 dbs_tuners_ins.up_threshold_any_cpu_load;
853 }
854 }
855
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800856 /* calculate the scaled load across CPU */
857 load_at_max_freq = (cur_load * policy->cur)/policy->cpuinfo.max_freq;
858
859 cpufreq_notify_utilization(policy, load_at_max_freq);
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700860 /* Check for frequency increase */
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700861 if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
David C Niemi3f78a9f2010-10-06 16:54:24 -0400862 /* If switching to max speed, apply sampling_down_factor */
863 if (policy->cur < policy->max)
864 this_dbs_info->rate_mult =
865 dbs_tuners_ins.sampling_down_factor;
Mike Chan00e299f2010-01-26 17:06:47 -0800866 dbs_freq_increase(policy, policy->max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 return;
868 }
869
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700870 if (num_online_cpus() > 1) {
871
872 if (max_load_other_cpu >
873 dbs_tuners_ins.up_threshold_any_cpu_load) {
874 if (policy->cur < dbs_tuners_ins.sync_freq)
875 dbs_freq_increase(policy,
876 dbs_tuners_ins.sync_freq);
877 return;
878 }
879
880 if (max_load_freq > dbs_tuners_ins.up_threshold_multi_core *
881 policy->cur) {
882 if (policy->cur < dbs_tuners_ins.optimal_freq)
883 dbs_freq_increase(policy,
884 dbs_tuners_ins.optimal_freq);
885 return;
886 }
887 }
888
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 /* Check for frequency decrease */
Dave Jonesc29f1402005-05-31 19:03:50 -0700890 /* if we cannot reduce the frequency anymore, break out early */
891 if (policy->cur == policy->min)
892 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893
Dave Jonesc29f1402005-05-31 19:03:50 -0700894 /*
895 * The optimal frequency is the frequency that is the lowest that
896 * can support the current CPU usage without triggering the up
897 * policy. To be safe, we focus 10 points under the threshold.
898 */
venkatesh.pallipadi@intel.come9d95bf2008-08-04 11:59:10 -0700899 if (max_load_freq <
900 (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) *
901 policy->cur) {
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700902 unsigned int freq_next;
venkatesh.pallipadi@intel.come9d95bf2008-08-04 11:59:10 -0700903 freq_next = max_load_freq /
904 (dbs_tuners_ins.up_threshold -
905 dbs_tuners_ins.down_differential);
Venkatesh Pallipadidfde5d62006-10-03 12:38:45 -0700906
David C Niemi3f78a9f2010-10-06 16:54:24 -0400907 /* No longer fully busy, reset rate_mult */
908 this_dbs_info->rate_mult = 1;
909
Nagananda.Chumbalkar@hp.com1dbf5882009-12-21 23:40:52 +0100910 if (freq_next < policy->min)
911 freq_next = policy->min;
912
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700913 if (num_online_cpus() > 1) {
914 if (max_load_other_cpu >
915 (dbs_tuners_ins.up_threshold_multi_core -
916 dbs_tuners_ins.down_differential) &&
917 freq_next < dbs_tuners_ins.sync_freq)
918 freq_next = dbs_tuners_ins.sync_freq;
919
920 if (max_load_freq >
Veena Sambasivan8aa24862013-05-14 12:36:48 -0700921 ((dbs_tuners_ins.up_threshold_multi_core -
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700922 dbs_tuners_ins.down_differential_multi_core) *
Veena Sambasivan8aa24862013-05-14 12:36:48 -0700923 policy->cur) &&
924 freq_next < dbs_tuners_ins.optimal_freq)
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700925 freq_next = dbs_tuners_ins.optimal_freq;
926
927 }
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400928 if (!dbs_tuners_ins.powersave_bias) {
929 __cpufreq_driver_target(policy, freq_next,
930 CPUFREQ_RELATION_L);
931 } else {
932 int freq = powersave_bias_target(policy, freq_next,
933 CPUFREQ_RELATION_L);
934 __cpufreq_driver_target(policy, freq,
935 CPUFREQ_RELATION_L);
936 }
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700937 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938}
939
David Howellsc4028952006-11-22 14:57:56 +0000940static void do_dbs_timer(struct work_struct *work)
Dave Jones32ee8c32006-02-28 00:43:23 -0500941{
Venkatesh Pallipadi529af7a2007-02-05 16:12:44 -0800942 struct cpu_dbs_info_s *dbs_info =
943 container_of(work, struct cpu_dbs_info_s, work.work);
944 unsigned int cpu = dbs_info->cpu;
945 int sample_type = dbs_info->sample_type;
946
Vincent Guittot5cb2c3b2011-02-07 17:14:25 +0100947 int delay;
Jocelyn Falempea665df92010-03-11 14:01:11 -0800948
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700949 mutex_lock(&dbs_info->timer_mutex);
Venkatesh Pallipadi56463b72007-02-05 16:12:45 -0800950
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400951 /* Common NORMAL_SAMPLE setup */
David Howellsc4028952006-11-22 14:57:56 +0000952 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400953 if (!dbs_tuners_ins.powersave_bias ||
David Howellsc4028952006-11-22 14:57:56 +0000954 sample_type == DBS_NORMAL_SAMPLE) {
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400955 dbs_check_cpu(dbs_info);
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400956 if (dbs_info->freq_lo) {
957 /* Setup timer for SUB_SAMPLE */
David Howellsc4028952006-11-22 14:57:56 +0000958 dbs_info->sample_type = DBS_SUB_SAMPLE;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400959 delay = dbs_info->freq_hi_jiffies;
Vincent Guittot5cb2c3b2011-02-07 17:14:25 +0100960 } else {
961 /* We want all CPUs to do sampling nearly on
962 * same jiffy
963 */
964 delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
965 * dbs_info->rate_mult);
966
967 if (num_online_cpus() > 1)
968 delay -= jiffies % delay;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400969 }
970 } else {
971 __cpufreq_driver_target(dbs_info->cur_policy,
Dave Jones2b03f892009-01-18 01:43:44 -0500972 dbs_info->freq_lo, CPUFREQ_RELATION_H);
Vincent Guittot5cb2c3b2011-02-07 17:14:25 +0100973 delay = dbs_info->freq_lo_jiffies;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400974 }
Matt Wagantall2aa4f052013-05-23 15:52:49 -0700975 queue_delayed_work_on(cpu, dbs_wq, &dbs_info->work, delay);
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700976 mutex_unlock(&dbs_info->timer_mutex);
Dave Jones32ee8c32006-02-28 00:43:23 -0500977}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978
Venkatesh Pallipadi529af7a2007-02-05 16:12:44 -0800979static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980{
Alexey Starikovskiy1ce28d62006-07-31 22:25:20 +0400981 /* We want all CPUs to do sampling nearly on same jiffy */
982 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
Jocelyn Falempea665df92010-03-11 14:01:11 -0800983
984 if (num_online_cpus() > 1)
985 delay -= jiffies % delay;
Venkatesh Pallipadi2f8a8352006-06-28 13:51:19 -0700986
David Howellsc4028952006-11-22 14:57:56 +0000987 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
Venki Pallipadi28287032007-05-08 00:27:47 -0700988 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
Matt Wagantall2aa4f052013-05-23 15:52:49 -0700989 queue_delayed_work_on(dbs_info->cpu, dbs_wq, &dbs_info->work, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990}
991
Linus Torvalds2cd7cbd2006-07-23 12:05:00 -0700992static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993{
Mathieu Desnoyersb14893a2009-05-17 10:30:45 -0400994 cancel_delayed_work_sync(&dbs_info->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995}
996
Arjan van de Ven19379b12010-05-09 08:26:51 -0700997/*
998 * Not all CPUs want IO time to be accounted as busy; this dependson how
999 * efficient idling at a higher frequency/voltage is.
1000 * Pavel Machek says this is not so for various generations of AMD and old
1001 * Intel systems.
1002 * Mike Chan (androidlcom) calis this is also not true for ARM.
1003 * Because of this, whitelist specific known (series) of CPUs by default, and
1004 * leave all others up to the user.
1005 */
1006static int should_io_be_busy(void)
1007{
1008#if defined(CONFIG_X86)
1009 /*
1010 * For Intel, Core 2 (model 15) andl later have an efficient idle.
1011 */
1012 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
1013 boot_cpu_data.x86 == 6 &&
1014 boot_cpu_data.x86_model >= 15)
1015 return 1;
1016#endif
1017 return 0;
1018}
1019
Stephen Boydc8fc3012012-10-31 17:43:08 -07001020static void dbs_refresh_callback(struct work_struct *work)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001021{
1022 struct cpufreq_policy *policy;
1023 struct cpu_dbs_info_s *this_dbs_info;
Stephen Boydc8fc3012012-10-31 17:43:08 -07001024 struct dbs_work_struct *dbs_work;
1025 unsigned int cpu;
Dilip Gudlurf78bea22013-06-17 13:04:31 -07001026 unsigned int target_freq;
Stephen Boydc8fc3012012-10-31 17:43:08 -07001027
1028 dbs_work = container_of(work, struct dbs_work_struct, work);
1029 cpu = dbs_work->cpu;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001030
Krishna Vankaa3e04d82012-06-08 11:35:43 +05301031 get_online_cpus();
1032
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001033 if (lock_policy_rwsem_write(cpu) < 0)
Krishna Vankaa3e04d82012-06-08 11:35:43 +05301034 goto bail_acq_sema_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001035
1036 this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
1037 policy = this_dbs_info->cur_policy;
David Ng4a0a0232011-08-03 14:04:43 -07001038 if (!policy) {
1039 /* CPU not using ondemand governor */
Krishna Vankaa3e04d82012-06-08 11:35:43 +05301040 goto bail_incorrect_governor;
David Ng4a0a0232011-08-03 14:04:43 -07001041 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001042
Dilip Gudlurf78bea22013-06-17 13:04:31 -07001043 if (dbs_tuners_ins.input_boost)
1044 target_freq = dbs_tuners_ins.input_boost;
1045 else
1046 target_freq = policy->max;
1047
1048 if (policy->cur < target_freq) {
Anji Jonnalaf8732322012-12-13 14:03:54 +05301049 /*
1050 * Arch specific cpufreq driver may fail.
1051 * Don't update governor frequency upon failure.
1052 */
Dilip Gudlurf78bea22013-06-17 13:04:31 -07001053 if (__cpufreq_driver_target(policy, target_freq,
Anji Jonnalaf8732322012-12-13 14:03:54 +05301054 CPUFREQ_RELATION_L) >= 0)
Dilip Gudlurf78bea22013-06-17 13:04:31 -07001055 policy->cur = target_freq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001056
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001057 this_dbs_info->prev_cpu_idle = get_cpu_idle_time(cpu,
1058 &this_dbs_info->prev_cpu_wall);
1059 }
Krishna Vankaa3e04d82012-06-08 11:35:43 +05301060
1061bail_incorrect_governor:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001062 unlock_policy_rwsem_write(cpu);
Krishna Vankaa3e04d82012-06-08 11:35:43 +05301063
1064bail_acq_sema_failed:
1065 put_online_cpus();
1066 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001067}
1068
Rohit Guptac496cdd2013-04-04 15:45:16 -07001069static int dbs_migration_notify(struct notifier_block *nb,
1070 unsigned long target_cpu, void *arg)
1071{
Steve Muckle538cfc12013-05-31 10:39:31 -07001072 struct cpu_dbs_info_s *target_dbs_info =
1073 &per_cpu(od_cpu_dbs_info, target_cpu);
Rohit Guptac496cdd2013-04-04 15:45:16 -07001074
Steve Muckle538cfc12013-05-31 10:39:31 -07001075 atomic_set(&target_dbs_info->src_sync_cpu, (int)arg);
1076 wake_up(&target_dbs_info->sync_wq);
Rohit Guptac496cdd2013-04-04 15:45:16 -07001077
1078 return NOTIFY_OK;
1079}
1080
1081static struct notifier_block dbs_migration_nb = {
1082 .notifier_call = dbs_migration_notify,
1083};
1084
Steve Muckle538cfc12013-05-31 10:39:31 -07001085static int sync_pending(struct cpu_dbs_info_s *this_dbs_info)
Rohit Guptac496cdd2013-04-04 15:45:16 -07001086{
Steve Muckle538cfc12013-05-31 10:39:31 -07001087 return atomic_read(&this_dbs_info->src_sync_cpu) >= 0;
1088}
1089
1090static int dbs_sync_thread(void *data)
1091{
1092 int src_cpu, cpu = (int)data;
Rohit Guptac496cdd2013-04-04 15:45:16 -07001093 unsigned int src_freq, src_max_load;
Steve Muckle538cfc12013-05-31 10:39:31 -07001094 struct cpu_dbs_info_s *this_dbs_info, *src_dbs_info;
1095 struct cpufreq_policy *policy;
Rohit Guptac496cdd2013-04-04 15:45:16 -07001096 int delay;
1097
Rohit Guptac496cdd2013-04-04 15:45:16 -07001098 this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
Rohit Guptac496cdd2013-04-04 15:45:16 -07001099
Steve Muckle538cfc12013-05-31 10:39:31 -07001100 while (1) {
1101 wait_event(this_dbs_info->sync_wq,
1102 sync_pending(this_dbs_info) ||
1103 kthread_should_stop());
Rohit Guptac496cdd2013-04-04 15:45:16 -07001104
Steve Muckle538cfc12013-05-31 10:39:31 -07001105 if (kthread_should_stop())
1106 break;
Rohit Guptac496cdd2013-04-04 15:45:16 -07001107
Steve Muckle538cfc12013-05-31 10:39:31 -07001108 get_online_cpus();
Rohit Guptac496cdd2013-04-04 15:45:16 -07001109
Steve Muckle538cfc12013-05-31 10:39:31 -07001110 src_cpu = atomic_read(&this_dbs_info->src_sync_cpu);
1111 src_dbs_info = &per_cpu(od_cpu_dbs_info, src_cpu);
1112 if (src_dbs_info != NULL &&
1113 src_dbs_info->cur_policy != NULL) {
1114 src_freq = src_dbs_info->cur_policy->cur;
1115 src_max_load = src_dbs_info->max_load;
1116 } else {
1117 src_freq = dbs_tuners_ins.sync_freq;
1118 src_max_load = 0;
1119 }
Rohit Guptac496cdd2013-04-04 15:45:16 -07001120
Steve Muckle538cfc12013-05-31 10:39:31 -07001121 if (lock_policy_rwsem_write(cpu) < 0)
1122 goto bail_acq_sema_failed;
1123
Rohit Gupta01585132013-06-17 17:56:27 -07001124 if (!atomic_read(&this_dbs_info->sync_enabled)) {
1125 atomic_set(&this_dbs_info->src_sync_cpu, -1);
1126 put_online_cpus();
1127 unlock_policy_rwsem_write(cpu);
1128 continue;
1129 }
1130
Steve Muckle538cfc12013-05-31 10:39:31 -07001131 policy = this_dbs_info->cur_policy;
1132 if (!policy) {
1133 /* CPU not using ondemand governor */
1134 goto bail_incorrect_governor;
1135 }
1136 delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
1137
1138
1139 if (policy->cur < src_freq) {
1140 /* cancel the next ondemand sample */
1141 cancel_delayed_work_sync(&this_dbs_info->work);
1142
1143 /*
1144 * Arch specific cpufreq driver may fail.
1145 * Don't update governor frequency upon failure.
1146 */
1147 if (__cpufreq_driver_target(policy, src_freq,
1148 CPUFREQ_RELATION_L) >= 0) {
1149 policy->cur = src_freq;
1150 if (src_max_load > this_dbs_info->max_load) {
1151 this_dbs_info->max_load = src_max_load;
1152 this_dbs_info->prev_load = src_max_load;
1153 }
1154 }
1155
1156 /* reschedule the next ondemand sample */
1157 mutex_lock(&this_dbs_info->timer_mutex);
1158 queue_delayed_work_on(cpu, dbs_wq,
1159 &this_dbs_info->work, delay);
1160 mutex_unlock(&this_dbs_info->timer_mutex);
1161 }
1162
1163bail_incorrect_governor:
1164 unlock_policy_rwsem_write(cpu);
Rohit Guptac496cdd2013-04-04 15:45:16 -07001165bail_acq_sema_failed:
Steve Muckle538cfc12013-05-31 10:39:31 -07001166 put_online_cpus();
1167 atomic_set(&this_dbs_info->src_sync_cpu, -1);
1168 }
1169
1170 return 0;
Rohit Guptac496cdd2013-04-04 15:45:16 -07001171}
1172
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001173static void dbs_input_event(struct input_handle *handle, unsigned int type,
1174 unsigned int code, int value)
1175{
Matt Wagantall2100f002012-10-19 15:26:48 -07001176 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001177
David Ng8192a2f2012-01-19 14:16:19 -08001178 if ((dbs_tuners_ins.powersave_bias == POWERSAVE_BIAS_MAXLEVEL) ||
1179 (dbs_tuners_ins.powersave_bias == POWERSAVE_BIAS_MINLEVEL)) {
1180 /* nothing to do */
1181 return;
1182 }
1183
Stephen Boydc8fc3012012-10-31 17:43:08 -07001184 for_each_online_cpu(i)
Matt Wagantall2aa4f052013-05-23 15:52:49 -07001185 queue_work_on(i, dbs_wq, &per_cpu(dbs_refresh_work, i).work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001186}
1187
1188static int dbs_input_connect(struct input_handler *handler,
1189 struct input_dev *dev, const struct input_device_id *id)
1190{
1191 struct input_handle *handle;
1192 int error;
1193
1194 handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
1195 if (!handle)
1196 return -ENOMEM;
1197
1198 handle->dev = dev;
1199 handle->handler = handler;
1200 handle->name = "cpufreq";
1201
1202 error = input_register_handle(handle);
1203 if (error)
1204 goto err2;
1205
1206 error = input_open_device(handle);
1207 if (error)
1208 goto err1;
1209
1210 return 0;
1211err1:
1212 input_unregister_handle(handle);
1213err2:
1214 kfree(handle);
1215 return error;
1216}
1217
1218static void dbs_input_disconnect(struct input_handle *handle)
1219{
1220 input_close_device(handle);
1221 input_unregister_handle(handle);
1222 kfree(handle);
1223}
1224
1225static const struct input_device_id dbs_ids[] = {
Tingwei Zhangcb74f482013-07-03 16:28:24 +08001226 /* multi-touch touchscreen */
1227 {
1228 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
1229 INPUT_DEVICE_ID_MATCH_ABSBIT,
1230 .evbit = { BIT_MASK(EV_ABS) },
1231 .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
1232 BIT_MASK(ABS_MT_POSITION_X) |
1233 BIT_MASK(ABS_MT_POSITION_Y) },
1234 },
1235 /* touchpad */
1236 {
1237 .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
1238 INPUT_DEVICE_ID_MATCH_ABSBIT,
1239 .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
1240 .absbit = { [BIT_WORD(ABS_X)] =
1241 BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
1242 },
1243 /* Keypad */
1244 {
1245 .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
1246 .evbit = { BIT_MASK(EV_KEY) },
1247 },
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001248 { },
1249};
1250
1251static struct input_handler dbs_input_handler = {
1252 .event = dbs_input_event,
1253 .connect = dbs_input_connect,
1254 .disconnect = dbs_input_disconnect,
1255 .name = "cpufreq_ond",
1256 .id_table = dbs_ids,
1257};
1258
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
1260 unsigned int event)
1261{
1262 unsigned int cpu = policy->cpu;
1263 struct cpu_dbs_info_s *this_dbs_info;
1264 unsigned int j;
Jeff Garzik914f7c32006-10-20 14:31:00 -07001265 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266
Tejun Heo245b2e72009-06-24 15:13:48 +09001267 this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268
1269 switch (event) {
1270 case CPUFREQ_GOV_START:
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -07001271 if ((!cpu_online(cpu)) || (!policy->cur))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 return -EINVAL;
1273
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08001274 mutex_lock(&dbs_mutex);
Jeff Garzik914f7c32006-10-20 14:31:00 -07001275
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -07001276 dbs_enable++;
Rusty Russell835481d2009-01-04 05:18:06 -08001277 for_each_cpu(j, policy->cpus) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 struct cpu_dbs_info_s *j_dbs_info;
Tejun Heo245b2e72009-06-24 15:13:48 +09001279 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 j_dbs_info->cur_policy = policy;
Dave Jones32ee8c32006-02-28 00:43:23 -05001281
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -07001282 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
1283 &j_dbs_info->prev_cpu_wall);
Glauber Costa3292beb2011-11-28 14:45:17 -02001284 if (dbs_tuners_ins.ignore_nice)
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -05001285 j_dbs_info->prev_cpu_nice =
Glauber Costa3292beb2011-11-28 14:45:17 -02001286 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
Steve Muckle538cfc12013-05-31 10:39:31 -07001287 set_cpus_allowed(j_dbs_info->sync_thread,
1288 *cpumask_of(j));
Rohit Gupta01585132013-06-17 17:56:27 -07001289 if (!dbs_tuners_ins.powersave_bias)
1290 atomic_set(&j_dbs_info->sync_enabled, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 }
Venkatesh Pallipadi529af7a2007-02-05 16:12:44 -08001292 this_dbs_info->cpu = cpu;
David C Niemi3f78a9f2010-10-06 16:54:24 -04001293 this_dbs_info->rate_mult = 1;
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -07001294 ondemand_powersave_bias_init_cpu(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 /*
1296 * Start the timerschedule work, when this governor
1297 * is used for first time
1298 */
1299 if (dbs_enable == 1) {
1300 unsigned int latency;
Thomas Renninger0e625ac2009-07-24 15:25:06 +02001301
1302 rc = sysfs_create_group(cpufreq_global_kobject,
1303 &dbs_attr_group);
1304 if (rc) {
1305 mutex_unlock(&dbs_mutex);
1306 return rc;
1307 }
1308
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 /* policy latency is in nS. Convert it to uS first */
Dave Jonesdf8b59b2005-09-20 12:39:35 -07001310 latency = policy->cpuinfo.transition_latency / 1000;
1311 if (latency == 0)
1312 latency = 1;
Thomas Renningercef96152009-04-22 13:48:29 +02001313 /* Bring kernel and HW constraints together */
1314 min_sampling_rate = max(min_sampling_rate,
1315 MIN_LATENCY_MULTIPLIER * latency);
1316 dbs_tuners_ins.sampling_rate =
1317 max(min_sampling_rate,
1318 latency * LATENCY_MULTIPLIER);
Arjan van de Ven19379b12010-05-09 08:26:51 -07001319 dbs_tuners_ins.io_is_busy = should_io_be_busy();
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -07001320
1321 if (dbs_tuners_ins.optimal_freq == 0)
1322 dbs_tuners_ins.optimal_freq = policy->min;
1323
1324 if (dbs_tuners_ins.sync_freq == 0)
1325 dbs_tuners_ins.sync_freq = policy->min;
Rohit Guptac496cdd2013-04-04 15:45:16 -07001326
1327 atomic_notifier_chain_register(&migration_notifier_head,
1328 &dbs_migration_nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001330 if (!cpu)
1331 rc = input_register_handler(&dbs_input_handler);
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08001332 mutex_unlock(&dbs_mutex);
venkatesh.pallipadi@intel.com7d26e2d2009-07-02 17:08:30 -07001333
David Ng8192a2f2012-01-19 14:16:19 -08001334
1335 if (!ondemand_powersave_bias_setspeed(
1336 this_dbs_info->cur_policy,
1337 NULL,
1338 dbs_tuners_ins.powersave_bias))
1339 dbs_timer_init(this_dbs_info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 break;
1341
1342 case CPUFREQ_GOV_STOP:
Linus Torvalds2cd7cbd2006-07-23 12:05:00 -07001343 dbs_timer_exit(this_dbs_info);
venkatesh.pallipadi@intel.com7d26e2d2009-07-02 17:08:30 -07001344
1345 mutex_lock(&dbs_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 dbs_enable--;
Steve Muckle538cfc12013-05-31 10:39:31 -07001347
1348 for_each_cpu(j, policy->cpus) {
1349 struct cpu_dbs_info_s *j_dbs_info;
1350 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
1351 atomic_set(&j_dbs_info->sync_enabled, 0);
1352 }
1353
Anitha Anand3dd65092012-01-18 17:17:40 -08001354 /* If device is being removed, policy is no longer
1355 * valid. */
1356 this_dbs_info->cur_policy = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001357 if (!cpu)
1358 input_unregister_handler(&dbs_input_handler);
Rohit Guptac496cdd2013-04-04 15:45:16 -07001359 if (!dbs_enable) {
Thomas Renninger0e625ac2009-07-24 15:25:06 +02001360 sysfs_remove_group(cpufreq_global_kobject,
1361 &dbs_attr_group);
Rohit Guptac496cdd2013-04-04 15:45:16 -07001362 atomic_notifier_chain_unregister(
1363 &migration_notifier_head,
1364 &dbs_migration_nb);
1365 }
1366
Venkat Devarasetty4edc7662013-01-30 18:08:51 +05301367 mutex_unlock(&dbs_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368
1369 break;
1370
1371 case CPUFREQ_GOV_LIMITS:
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -07001372 mutex_lock(&this_dbs_info->timer_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 if (policy->max < this_dbs_info->cur_policy->cur)
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -07001374 __cpufreq_driver_target(this_dbs_info->cur_policy,
Dave Jones2b03f892009-01-18 01:43:44 -05001375 policy->max, CPUFREQ_RELATION_H);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 else if (policy->min > this_dbs_info->cur_policy->cur)
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -07001377 __cpufreq_driver_target(this_dbs_info->cur_policy,
Dave Jones2b03f892009-01-18 01:43:44 -05001378 policy->min, CPUFREQ_RELATION_L);
David Ng8192a2f2012-01-19 14:16:19 -08001379 else if (dbs_tuners_ins.powersave_bias != 0)
1380 ondemand_powersave_bias_setspeed(
1381 this_dbs_info->cur_policy,
1382 policy,
1383 dbs_tuners_ins.powersave_bias);
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -07001384 mutex_unlock(&this_dbs_info->timer_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 break;
1386 }
1387 return 0;
1388}
1389
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390static int __init cpufreq_gov_dbs_init(void)
1391{
Andrea Righi4f6e6b92008-09-18 10:43:40 +00001392 u64 idle_time;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001393 unsigned int i;
Andrea Righi4f6e6b92008-09-18 10:43:40 +00001394 int cpu = get_cpu();
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -07001395
Kamalesh Babulal21f2e3c2011-12-09 16:18:42 +05301396 idle_time = get_cpu_idle_time_us(cpu, NULL);
Andrea Righi4f6e6b92008-09-18 10:43:40 +00001397 put_cpu();
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -07001398 if (idle_time != -1ULL) {
1399 /* Idle micro accounting is supported. Use finer thresholds */
1400 dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
1401 dbs_tuners_ins.down_differential =
1402 MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
Thomas Renningercef96152009-04-22 13:48:29 +02001403 /*
Paul Bollebd74b322011-08-06 14:33:43 +02001404 * In nohz/micro accounting case we set the minimum frequency
Thomas Renningercef96152009-04-22 13:48:29 +02001405 * not depending on HZ, but fixed (very low). The deferred
1406 * timer might skip some samples if idle/sleeping as needed.
1407 */
1408 min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
1409 } else {
1410 /* For correct statistics, we need 10 ticks for each measure */
1411 min_sampling_rate =
1412 MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -07001413 }
Akinobu Mita888a7942008-07-14 12:00:45 +09001414
Matt Wagantall2aa4f052013-05-23 15:52:49 -07001415 dbs_wq = alloc_workqueue("ondemand_dbs_wq", WQ_HIGHPRI, 0);
1416 if (!dbs_wq) {
1417 printk(KERN_ERR "Failed to create ondemand_dbs_wq workqueue\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001418 return -EFAULT;
1419 }
1420 for_each_possible_cpu(i) {
Praveen Chidambaram457a4452012-07-19 10:45:07 -06001421 struct cpu_dbs_info_s *this_dbs_info =
1422 &per_cpu(od_cpu_dbs_info, i);
Stephen Boydc8fc3012012-10-31 17:43:08 -07001423 struct dbs_work_struct *dbs_work =
1424 &per_cpu(dbs_refresh_work, i);
1425
Praveen Chidambaram457a4452012-07-19 10:45:07 -06001426 mutex_init(&this_dbs_info->timer_mutex);
Stephen Boydc8fc3012012-10-31 17:43:08 -07001427 INIT_WORK(&dbs_work->work, dbs_refresh_callback);
1428 dbs_work->cpu = i;
Rohit Guptac496cdd2013-04-04 15:45:16 -07001429
Steve Muckle538cfc12013-05-31 10:39:31 -07001430 atomic_set(&this_dbs_info->src_sync_cpu, -1);
1431 init_waitqueue_head(&this_dbs_info->sync_wq);
Rohit Guptac496cdd2013-04-04 15:45:16 -07001432
Steve Muckle538cfc12013-05-31 10:39:31 -07001433 this_dbs_info->sync_thread = kthread_run(dbs_sync_thread,
1434 (void *)i,
1435 "dbs_sync/%d", i);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001436 }
1437
Tejun Heo57df5572011-01-26 12:12:50 +01001438 return cpufreq_register_governor(&cpufreq_gov_ondemand);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439}
1440
1441static void __exit cpufreq_gov_dbs_exit(void)
1442{
Anji Jonnala4c1485f2012-11-14 13:34:54 +05301443 unsigned int i;
1444
Thomas Renninger1c256242007-10-02 13:28:12 -07001445 cpufreq_unregister_governor(&cpufreq_gov_ondemand);
Anji Jonnala4c1485f2012-11-14 13:34:54 +05301446 for_each_possible_cpu(i) {
1447 struct cpu_dbs_info_s *this_dbs_info =
1448 &per_cpu(od_cpu_dbs_info, i);
1449 mutex_destroy(&this_dbs_info->timer_mutex);
Steve Muckle538cfc12013-05-31 10:39:31 -07001450 kthread_stop(this_dbs_info->sync_thread);
Anji Jonnala4c1485f2012-11-14 13:34:54 +05301451 }
Matt Wagantall2aa4f052013-05-23 15:52:49 -07001452 destroy_workqueue(dbs_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453}
1454
1455
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -07001456MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
1457MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
1458MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
Dave Jones2b03f892009-01-18 01:43:44 -05001459 "Low Latency Frequency Transition capable processors");
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -07001460MODULE_LICENSE("GPL");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461
Johannes Weiner69157192008-01-17 15:21:08 -08001462#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
1463fs_initcall(cpufreq_gov_dbs_init);
1464#else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465module_init(cpufreq_gov_dbs_init);
Johannes Weiner69157192008-01-17 15:21:08 -08001466#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467module_exit(cpufreq_gov_dbs_exit);