blob: 418c488fa8f0ab432625d4a85f5cc8af76095c2c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * drivers/cpufreq/cpufreq_ondemand.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
Rohit Guptac496cdd2013-04-04 15:45:16 -07007 * (c) 2013 The Linux Foundation. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/cpufreq.h>
Andrew Morton138a01282006-06-23 03:31:19 -070018#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/jiffies.h>
20#include <linux/kernel_stat.h>
akpm@osdl.org3fc54d32006-01-13 15:54:22 -080021#include <linux/mutex.h>
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -070022#include <linux/hrtimer.h>
23#include <linux/tick.h>
24#include <linux/ktime.h>
Steve Muckle538cfc12013-05-31 10:39:31 -070025#include <linux/kthread.h>
Thomas Renninger9411b4e2009-02-04 11:54:04 +010026#include <linux/sched.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027#include <linux/input.h>
28#include <linux/workqueue.h>
29#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
31/*
32 * dbs is used in this file as a shortform for demandbased switching
33 * It helps to keep variable names smaller, simpler
34 */
35
venkatesh.pallipadi@intel.come9d95bf2008-08-04 11:59:10 -070036#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#define DEF_FREQUENCY_UP_THRESHOLD (80)
David C Niemi3f78a9f2010-10-06 16:54:24 -040038#define DEF_SAMPLING_DOWN_FACTOR (1)
39#define MAX_SAMPLING_DOWN_FACTOR (100000)
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -070040#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
41#define MICRO_FREQUENCY_UP_THRESHOLD (95)
Thomas Renningercef96152009-04-22 13:48:29 +020042#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
Dave Jonesc29f1402005-05-31 19:03:50 -070043#define MIN_FREQUENCY_UP_THRESHOLD (11)
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#define MAX_FREQUENCY_UP_THRESHOLD (100)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045#define MIN_FREQUENCY_DOWN_DIFFERENTIAL (1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Dave Jones32ee8c32006-02-28 00:43:23 -050047/*
48 * The polling frequency of this governor depends on the capability of
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 * the processor. Default polling frequency is 1000 times the transition
Dave Jones32ee8c32006-02-28 00:43:23 -050050 * latency of the processor. The governor will work on any processor with
51 * transition latency <= 10mS, using appropriate sampling
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 * rate.
53 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
54 * this governor will not work.
55 * All times here are in uS.
56 */
Dave Jonesdf8b59b2005-09-20 12:39:35 -070057#define MIN_SAMPLING_RATE_RATIO (2)
Thomas Renninger112124a2009-02-04 11:55:12 +010058
Thomas Renningercef96152009-04-22 13:48:29 +020059static unsigned int min_sampling_rate;
60
Thomas Renninger112124a2009-02-04 11:55:12 +010061#define LATENCY_MULTIPLIER (1000)
Thomas Renningercef96152009-04-22 13:48:29 +020062#define MIN_LATENCY_MULTIPLIER (100)
Thomas Renninger1c256242007-10-02 13:28:12 -070063#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
David Ng8192a2f2012-01-19 14:16:19 -080065#define POWERSAVE_BIAS_MAXLEVEL (1000)
66#define POWERSAVE_BIAS_MINLEVEL (-1000)
67
David Howellsc4028952006-11-22 14:57:56 +000068static void do_dbs_timer(struct work_struct *work);
Thomas Renninger0e625ac2009-07-24 15:25:06 +020069static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
70 unsigned int event);
71
72#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
73static
74#endif
75struct cpufreq_governor cpufreq_gov_ondemand = {
76 .name = "ondemand",
77 .governor = cpufreq_governor_dbs,
78 .max_transition_latency = TRANSITION_LATENCY_LIMIT,
79 .owner = THIS_MODULE,
80};
David Howellsc4028952006-11-22 14:57:56 +000081
82/* Sampling types */
Venkatesh Pallipadi529af7a2007-02-05 16:12:44 -080083enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
85struct cpu_dbs_info_s {
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -070086 cputime64_t prev_cpu_idle;
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -070087 cputime64_t prev_cpu_iowait;
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -070088 cputime64_t prev_cpu_wall;
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -070089 cputime64_t prev_cpu_nice;
Dave Jones32ee8c32006-02-28 00:43:23 -050090 struct cpufreq_policy *cur_policy;
Dave Jones2b03f892009-01-18 01:43:44 -050091 struct delayed_work work;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +040092 struct cpufreq_frequency_table *freq_table;
93 unsigned int freq_lo;
94 unsigned int freq_lo_jiffies;
95 unsigned int freq_hi_jiffies;
David C Niemi3f78a9f2010-10-06 16:54:24 -040096 unsigned int rate_mult;
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -070097 unsigned int prev_load;
98 unsigned int max_load;
Venkatesh Pallipadi529af7a2007-02-05 16:12:44 -080099 int cpu;
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700100 unsigned int sample_type:1;
101 /*
102 * percpu mutex that serializes governor limit change with
103 * do_dbs_timer invocation. We do not want do_dbs_timer to run
104 * when user is changing the governor or limits.
105 */
106 struct mutex timer_mutex;
Steve Muckle538cfc12013-05-31 10:39:31 -0700107
108 struct task_struct *sync_thread;
109 wait_queue_head_t sync_wq;
110 atomic_t src_sync_cpu;
111 atomic_t sync_enabled;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112};
Tejun Heo245b2e72009-06-24 15:13:48 +0900113static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
David Ng8192a2f2012-01-19 14:16:19 -0800115static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info);
116static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info);
117
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118static unsigned int dbs_enable; /* number of CPUs using this policy */
119
Venkatesh Pallipadi4ec223d2006-06-21 15:18:34 -0700120/*
Matt Wagantall46aa0662013-05-31 20:02:01 -0700121 * dbs_mutex protects dbs_enable and dbs_info during start/stop.
Venkatesh Pallipadi4ec223d2006-06-21 15:18:34 -0700122 */
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -0700123static DEFINE_MUTEX(dbs_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
Matt Wagantall2aa4f052013-05-23 15:52:49 -0700125static struct workqueue_struct *dbs_wq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700126
Stephen Boydc8fc3012012-10-31 17:43:08 -0700127struct dbs_work_struct {
128 struct work_struct work;
129 unsigned int cpu;
130};
131
132static DEFINE_PER_CPU(struct dbs_work_struct, dbs_refresh_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700133
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400134static struct dbs_tuners {
Dave Jones32ee8c32006-02-28 00:43:23 -0500135 unsigned int sampling_rate;
Dave Jones32ee8c32006-02-28 00:43:23 -0500136 unsigned int up_threshold;
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700137 unsigned int up_threshold_multi_core;
venkatesh.pallipadi@intel.come9d95bf2008-08-04 11:59:10 -0700138 unsigned int down_differential;
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700139 unsigned int down_differential_multi_core;
140 unsigned int optimal_freq;
141 unsigned int up_threshold_any_cpu_load;
142 unsigned int sync_freq;
Dave Jones32ee8c32006-02-28 00:43:23 -0500143 unsigned int ignore_nice;
David C Niemi3f78a9f2010-10-06 16:54:24 -0400144 unsigned int sampling_down_factor;
David Ng8192a2f2012-01-19 14:16:19 -0800145 int powersave_bias;
Arjan van de Ven19379b12010-05-09 08:26:51 -0700146 unsigned int io_is_busy;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400147} dbs_tuners_ins = {
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700148 .up_threshold_multi_core = DEF_FREQUENCY_UP_THRESHOLD,
Dave Jones32ee8c32006-02-28 00:43:23 -0500149 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
David C Niemi3f78a9f2010-10-06 16:54:24 -0400150 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
venkatesh.pallipadi@intel.come9d95bf2008-08-04 11:59:10 -0700151 .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700152 .down_differential_multi_core = MICRO_FREQUENCY_DOWN_DIFFERENTIAL,
153 .up_threshold_any_cpu_load = DEF_FREQUENCY_UP_THRESHOLD,
Eric Piel9cbad612006-03-10 11:35:27 +0200154 .ignore_nice = 0,
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400155 .powersave_bias = 0,
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700156 .sync_freq = 0,
157 .optimal_freq = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158};
159
Glauber Costa3292beb2011-11-28 14:45:17 -0200160static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
Dave Jonesdac1c1a2005-05-31 19:03:49 -0700161{
Glauber Costa3292beb2011-11-28 14:45:17 -0200162 u64 idle_time;
Martin Schwidefsky612ef282011-12-19 19:23:15 +0100163 u64 cur_wall_time;
Glauber Costa3292beb2011-11-28 14:45:17 -0200164 u64 busy_time;
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700165
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700166 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700167
Martin Schwidefsky612ef282011-12-19 19:23:15 +0100168 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
169 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
Glauber Costa3292beb2011-11-28 14:45:17 -0200170 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
171 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
172 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
173 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700174
Martin Schwidefsky64861632011-12-15 14:56:09 +0100175 idle_time = cur_wall_time - busy_time;
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700176 if (wall)
Glauber Costa3292beb2011-11-28 14:45:17 -0200177 *wall = jiffies_to_usecs(cur_wall_time);
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700178
Glauber Costa3292beb2011-11-28 14:45:17 -0200179 return jiffies_to_usecs(idle_time);
Dave Jonesdac1c1a2005-05-31 19:03:49 -0700180}
181
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -0700182static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
183{
Michal Hocko6beea0c2011-08-24 09:37:48 +0200184 u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -0700185
186 if (idle_time == -1ULL)
187 return get_cpu_idle_time_jiffy(cpu, wall);
Michal Hocko6beea0c2011-08-24 09:37:48 +0200188 else
189 idle_time += get_cpu_iowait_time_us(cpu, wall);
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -0700190
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -0700191 return idle_time;
192}
193
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700194static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall)
195{
196 u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
197
198 if (iowait_time == -1ULL)
199 return 0;
200
201 return iowait_time;
202}
203
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400204/*
205 * Find right freq to be set now with powersave_bias on.
206 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
207 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
208 */
Adrian Bunkb5ecf602006-08-13 23:00:08 +0200209static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
210 unsigned int freq_next,
211 unsigned int relation)
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400212{
David Ng8192a2f2012-01-19 14:16:19 -0800213 unsigned int freq_req, freq_avg;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400214 unsigned int freq_hi, freq_lo;
215 unsigned int index = 0;
216 unsigned int jiffies_total, jiffies_hi, jiffies_lo;
David Ng8192a2f2012-01-19 14:16:19 -0800217 int freq_reduc;
Tejun Heo245b2e72009-06-24 15:13:48 +0900218 struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
219 policy->cpu);
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400220
221 if (!dbs_info->freq_table) {
222 dbs_info->freq_lo = 0;
223 dbs_info->freq_lo_jiffies = 0;
224 return freq_next;
225 }
226
227 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
228 relation, &index);
229 freq_req = dbs_info->freq_table[index].frequency;
230 freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
231 freq_avg = freq_req - freq_reduc;
232
233 /* Find freq bounds for freq_avg in freq_table */
234 index = 0;
235 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
236 CPUFREQ_RELATION_H, &index);
237 freq_lo = dbs_info->freq_table[index].frequency;
238 index = 0;
239 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
240 CPUFREQ_RELATION_L, &index);
241 freq_hi = dbs_info->freq_table[index].frequency;
242
243 /* Find out how long we have to be in hi and lo freqs */
244 if (freq_hi == freq_lo) {
245 dbs_info->freq_lo = 0;
246 dbs_info->freq_lo_jiffies = 0;
247 return freq_lo;
248 }
249 jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
250 jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
251 jiffies_hi += ((freq_hi - freq_lo) / 2);
252 jiffies_hi /= (freq_hi - freq_lo);
253 jiffies_lo = jiffies_total - jiffies_hi;
254 dbs_info->freq_lo = freq_lo;
255 dbs_info->freq_lo_jiffies = jiffies_lo;
256 dbs_info->freq_hi_jiffies = jiffies_hi;
257 return freq_hi;
258}
259
David Ng8192a2f2012-01-19 14:16:19 -0800260static int ondemand_powersave_bias_setspeed(struct cpufreq_policy *policy,
261 struct cpufreq_policy *altpolicy,
262 int level)
263{
264 if (level == POWERSAVE_BIAS_MAXLEVEL) {
265 /* maximum powersave; set to lowest frequency */
266 __cpufreq_driver_target(policy,
267 (altpolicy) ? altpolicy->min : policy->min,
268 CPUFREQ_RELATION_L);
269 return 1;
270 } else if (level == POWERSAVE_BIAS_MINLEVEL) {
271 /* minimum powersave; set to highest frequency */
272 __cpufreq_driver_target(policy,
273 (altpolicy) ? altpolicy->max : policy->max,
274 CPUFREQ_RELATION_H);
275 return 1;
276 }
277 return 0;
278}
279
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700280static void ondemand_powersave_bias_init_cpu(int cpu)
281{
Tejun Heo384be2b2009-08-14 14:41:02 +0900282 struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700283 dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
284 dbs_info->freq_lo = 0;
285}
286
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400287static void ondemand_powersave_bias_init(void)
288{
289 int i;
290 for_each_online_cpu(i) {
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700291 ondemand_powersave_bias_init_cpu(i);
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400292 }
293}
294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295/************************** sysfs interface ************************/
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200296
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200297static ssize_t show_sampling_rate_min(struct kobject *kobj,
298 struct attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299{
Thomas Renningercef96152009-04-22 13:48:29 +0200300 return sprintf(buf, "%u\n", min_sampling_rate);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200303define_one_global_ro(sampling_rate_min);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304
305/* cpufreq_ondemand Governor Tunables */
306#define show_one(file_name, object) \
307static ssize_t show_##file_name \
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200308(struct kobject *kobj, struct attribute *attr, char *buf) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309{ \
310 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
311}
312show_one(sampling_rate, sampling_rate);
Arjan van de Ven19379b12010-05-09 08:26:51 -0700313show_one(io_is_busy, io_is_busy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314show_one(up_threshold, up_threshold);
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700315show_one(up_threshold_multi_core, up_threshold_multi_core);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700316show_one(down_differential, down_differential);
David C Niemi3f78a9f2010-10-06 16:54:24 -0400317show_one(sampling_down_factor, sampling_down_factor);
Alexander Clouter001893c2005-12-01 01:09:25 -0800318show_one(ignore_nice_load, ignore_nice);
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700319show_one(optimal_freq, optimal_freq);
320show_one(up_threshold_any_cpu_load, up_threshold_any_cpu_load);
321show_one(sync_freq, sync_freq);
David Ng8192a2f2012-01-19 14:16:19 -0800322
323static ssize_t show_powersave_bias
324(struct kobject *kobj, struct attribute *attr, char *buf)
325{
326 return snprintf(buf, PAGE_SIZE, "%d\n", dbs_tuners_ins.powersave_bias);
327}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328
MyungJoo Hamfd0ef7a2012-02-29 17:54:41 +0900329/**
330 * update_sampling_rate - update sampling rate effective immediately if needed.
331 * @new_rate: new sampling rate
332 *
333 * If new rate is smaller than the old, simply updaing
334 * dbs_tuners_int.sampling_rate might not be appropriate. For example,
335 * if the original sampling_rate was 1 second and the requested new sampling
336 * rate is 10 ms because the user needs immediate reaction from ondemand
337 * governor, but not sure if higher frequency will be required or not,
338 * then, the governor may change the sampling rate too late; up to 1 second
339 * later. Thus, if we are reducing the sampling rate, we need to make the
340 * new value effective immediately.
341 */
342static void update_sampling_rate(unsigned int new_rate)
343{
344 int cpu;
345
346 dbs_tuners_ins.sampling_rate = new_rate
347 = max(new_rate, min_sampling_rate);
348
349 for_each_online_cpu(cpu) {
350 struct cpufreq_policy *policy;
351 struct cpu_dbs_info_s *dbs_info;
352 unsigned long next_sampling, appointed_at;
353
354 policy = cpufreq_cpu_get(cpu);
355 if (!policy)
356 continue;
357 dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu);
358 cpufreq_cpu_put(policy);
359
360 mutex_lock(&dbs_info->timer_mutex);
361
362 if (!delayed_work_pending(&dbs_info->work)) {
363 mutex_unlock(&dbs_info->timer_mutex);
364 continue;
365 }
366
367 next_sampling = jiffies + usecs_to_jiffies(new_rate);
368 appointed_at = dbs_info->work.timer.expires;
369
370
371 if (time_before(next_sampling, appointed_at)) {
372
373 mutex_unlock(&dbs_info->timer_mutex);
374 cancel_delayed_work_sync(&dbs_info->work);
375 mutex_lock(&dbs_info->timer_mutex);
376
Matt Wagantall2aa4f052013-05-23 15:52:49 -0700377 queue_delayed_work_on(dbs_info->cpu, dbs_wq,
378 &dbs_info->work, usecs_to_jiffies(new_rate));
MyungJoo Hamfd0ef7a2012-02-29 17:54:41 +0900379
380 }
381 mutex_unlock(&dbs_info->timer_mutex);
382 }
383}
384
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200385static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
386 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387{
388 unsigned int input;
389 int ret;
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -0700390 ret = sscanf(buf, "%u", &input);
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700391 if (ret != 1)
392 return -EINVAL;
MyungJoo Hamfd0ef7a2012-02-29 17:54:41 +0900393 update_sampling_rate(input);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 return count;
395}
396
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700397static ssize_t store_sync_freq(struct kobject *a, struct attribute *b,
398 const char *buf, size_t count)
399{
400 unsigned int input;
401 int ret;
402
403 ret = sscanf(buf, "%u", &input);
404 if (ret != 1)
405 return -EINVAL;
406 dbs_tuners_ins.sync_freq = input;
407 return count;
408}
409
Arjan van de Ven19379b12010-05-09 08:26:51 -0700410static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
411 const char *buf, size_t count)
412{
413 unsigned int input;
414 int ret;
415
416 ret = sscanf(buf, "%u", &input);
417 if (ret != 1)
418 return -EINVAL;
Arjan van de Ven19379b12010-05-09 08:26:51 -0700419 dbs_tuners_ins.io_is_busy = !!input;
Arjan van de Ven19379b12010-05-09 08:26:51 -0700420 return count;
421}
422
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700423static ssize_t store_optimal_freq(struct kobject *a, struct attribute *b,
424 const char *buf, size_t count)
425{
426 unsigned int input;
427 int ret;
428
429 ret = sscanf(buf, "%u", &input);
430 if (ret != 1)
431 return -EINVAL;
432 dbs_tuners_ins.optimal_freq = input;
433 return count;
434}
435
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200436static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
437 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438{
439 unsigned int input;
440 int ret;
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -0700441 ret = sscanf(buf, "%u", &input);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442
Dave Jones32ee8c32006-02-28 00:43:23 -0500443 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
Dave Jonesc29f1402005-05-31 19:03:50 -0700444 input < MIN_FREQUENCY_UP_THRESHOLD) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 return -EINVAL;
446 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 dbs_tuners_ins.up_threshold = input;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 return count;
449}
450
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700451static ssize_t store_up_threshold_multi_core(struct kobject *a,
452 struct attribute *b, const char *buf, size_t count)
453{
454 unsigned int input;
455 int ret;
456 ret = sscanf(buf, "%u", &input);
457
458 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
459 input < MIN_FREQUENCY_UP_THRESHOLD) {
460 return -EINVAL;
461 }
462 dbs_tuners_ins.up_threshold_multi_core = input;
463 return count;
464}
465
466static ssize_t store_up_threshold_any_cpu_load(struct kobject *a,
467 struct attribute *b, const char *buf, size_t count)
468{
469 unsigned int input;
470 int ret;
471 ret = sscanf(buf, "%u", &input);
472
473 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
474 input < MIN_FREQUENCY_UP_THRESHOLD) {
475 return -EINVAL;
476 }
477 dbs_tuners_ins.up_threshold_any_cpu_load = input;
478 return count;
479}
480
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700481static ssize_t store_down_differential(struct kobject *a, struct attribute *b,
482 const char *buf, size_t count)
483{
484 unsigned int input;
485 int ret;
486 ret = sscanf(buf, "%u", &input);
487
488 if (ret != 1 || input >= dbs_tuners_ins.up_threshold ||
489 input < MIN_FREQUENCY_DOWN_DIFFERENTIAL) {
490 return -EINVAL;
491 }
492
493 dbs_tuners_ins.down_differential = input;
494
495 return count;
496}
497
David C Niemi3f78a9f2010-10-06 16:54:24 -0400498static ssize_t store_sampling_down_factor(struct kobject *a,
499 struct attribute *b, const char *buf, size_t count)
500{
501 unsigned int input, j;
502 int ret;
503 ret = sscanf(buf, "%u", &input);
504
505 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
506 return -EINVAL;
David C Niemi3f78a9f2010-10-06 16:54:24 -0400507 dbs_tuners_ins.sampling_down_factor = input;
508
509 /* Reset down sampling multiplier in case it was active */
510 for_each_online_cpu(j) {
511 struct cpu_dbs_info_s *dbs_info;
512 dbs_info = &per_cpu(od_cpu_dbs_info, j);
513 dbs_info->rate_mult = 1;
514 }
David C Niemi3f78a9f2010-10-06 16:54:24 -0400515 return count;
516}
517
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200518static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
519 const char *buf, size_t count)
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700520{
521 unsigned int input;
522 int ret;
523
524 unsigned int j;
Dave Jones32ee8c32006-02-28 00:43:23 -0500525
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -0700526 ret = sscanf(buf, "%u", &input);
Dave Jones2b03f892009-01-18 01:43:44 -0500527 if (ret != 1)
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700528 return -EINVAL;
529
Dave Jones2b03f892009-01-18 01:43:44 -0500530 if (input > 1)
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700531 input = 1;
Dave Jones32ee8c32006-02-28 00:43:23 -0500532
Dave Jones2b03f892009-01-18 01:43:44 -0500533 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700534 return count;
535 }
536 dbs_tuners_ins.ignore_nice = input;
537
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700538 /* we need to re-evaluate prev_cpu_idle */
Dave Jonesdac1c1a2005-05-31 19:03:49 -0700539 for_each_online_cpu(j) {
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700540 struct cpu_dbs_info_s *dbs_info;
Tejun Heo245b2e72009-06-24 15:13:48 +0900541 dbs_info = &per_cpu(od_cpu_dbs_info, j);
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700542 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
543 &dbs_info->prev_cpu_wall);
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500544 if (dbs_tuners_ins.ignore_nice)
Glauber Costa3292beb2011-11-28 14:45:17 -0200545 dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500546
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700547 }
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700548 return count;
549}
550
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200551static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
552 const char *buf, size_t count)
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400553{
David Ng8192a2f2012-01-19 14:16:19 -0800554 int input = 0;
555 int bypass = 0;
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530556 int ret, cpu, reenable_timer, j;
David Ng8192a2f2012-01-19 14:16:19 -0800557 struct cpu_dbs_info_s *dbs_info;
558
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530559 struct cpumask cpus_timer_done;
560 cpumask_clear(&cpus_timer_done);
561
David Ng8192a2f2012-01-19 14:16:19 -0800562 ret = sscanf(buf, "%d", &input);
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400563
564 if (ret != 1)
565 return -EINVAL;
566
David Ng8192a2f2012-01-19 14:16:19 -0800567 if (input >= POWERSAVE_BIAS_MAXLEVEL) {
568 input = POWERSAVE_BIAS_MAXLEVEL;
569 bypass = 1;
570 } else if (input <= POWERSAVE_BIAS_MINLEVEL) {
571 input = POWERSAVE_BIAS_MINLEVEL;
572 bypass = 1;
573 }
574
575 if (input == dbs_tuners_ins.powersave_bias) {
576 /* no change */
577 return count;
578 }
579
580 reenable_timer = ((dbs_tuners_ins.powersave_bias ==
581 POWERSAVE_BIAS_MAXLEVEL) ||
582 (dbs_tuners_ins.powersave_bias ==
583 POWERSAVE_BIAS_MINLEVEL));
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400584
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400585 dbs_tuners_ins.powersave_bias = input;
Matt Wagantall46aa0662013-05-31 20:02:01 -0700586
587 mutex_lock(&dbs_mutex);
Matt Wagantall0afbad12013-05-31 13:14:44 -0700588 get_online_cpus();
Matt Wagantall46aa0662013-05-31 20:02:01 -0700589
David Ng8192a2f2012-01-19 14:16:19 -0800590 if (!bypass) {
591 if (reenable_timer) {
592 /* reinstate dbs timer */
593 for_each_online_cpu(cpu) {
594 if (lock_policy_rwsem_write(cpu) < 0)
595 continue;
596
597 dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530598
599 for_each_cpu(j, &cpus_timer_done) {
600 if (!dbs_info->cur_policy) {
601 pr_err("Dbs policy is NULL\n");
602 goto skip_this_cpu;
603 }
604 if (cpumask_test_cpu(j, dbs_info->
605 cur_policy->cpus))
606 goto skip_this_cpu;
607 }
608
609 cpumask_set_cpu(cpu, &cpus_timer_done);
David Ng8192a2f2012-01-19 14:16:19 -0800610 if (dbs_info->cur_policy) {
611 /* restart dbs timer */
612 dbs_timer_init(dbs_info);
Rohit Gupta01585132013-06-17 17:56:27 -0700613 /* Enable frequency synchronization
614 * of CPUs */
615 atomic_set(&dbs_info->sync_enabled, 1);
David Ng8192a2f2012-01-19 14:16:19 -0800616 }
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530617skip_this_cpu:
David Ng8192a2f2012-01-19 14:16:19 -0800618 unlock_policy_rwsem_write(cpu);
619 }
620 }
621 ondemand_powersave_bias_init();
622 } else {
623 /* running at maximum or minimum frequencies; cancel
624 dbs timer as periodic load sampling is not necessary */
625 for_each_online_cpu(cpu) {
626 if (lock_policy_rwsem_write(cpu) < 0)
627 continue;
628
629 dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530630
631 for_each_cpu(j, &cpus_timer_done) {
632 if (!dbs_info->cur_policy) {
633 pr_err("Dbs policy is NULL\n");
634 goto skip_this_cpu_bypass;
635 }
636 if (cpumask_test_cpu(j, dbs_info->
637 cur_policy->cpus))
638 goto skip_this_cpu_bypass;
639 }
640
641 cpumask_set_cpu(cpu, &cpus_timer_done);
642
David Ng8192a2f2012-01-19 14:16:19 -0800643 if (dbs_info->cur_policy) {
644 /* cpu using ondemand, cancel dbs timer */
David Ng8192a2f2012-01-19 14:16:19 -0800645 dbs_timer_exit(dbs_info);
Rohit Gupta01585132013-06-17 17:56:27 -0700646 /* Disable frequency synchronization of
647 * CPUs to avoid re-queueing of work from
648 * sync_thread */
649 atomic_set(&dbs_info->sync_enabled, 0);
David Ng8192a2f2012-01-19 14:16:19 -0800650
Rohit Gupta3476e4f2013-06-17 16:57:08 -0700651 mutex_lock(&dbs_info->timer_mutex);
David Ng8192a2f2012-01-19 14:16:19 -0800652 ondemand_powersave_bias_setspeed(
653 dbs_info->cur_policy,
654 NULL,
655 input);
David Ng8192a2f2012-01-19 14:16:19 -0800656 mutex_unlock(&dbs_info->timer_mutex);
Rohit Gupta3476e4f2013-06-17 16:57:08 -0700657
David Ng8192a2f2012-01-19 14:16:19 -0800658 }
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530659skip_this_cpu_bypass:
David Ng8192a2f2012-01-19 14:16:19 -0800660 unlock_policy_rwsem_write(cpu);
661 }
662 }
Matt Wagantall46aa0662013-05-31 20:02:01 -0700663
Matt Wagantall0afbad12013-05-31 13:14:44 -0700664 put_online_cpus();
Matt Wagantall46aa0662013-05-31 20:02:01 -0700665 mutex_unlock(&dbs_mutex);
David Ng8192a2f2012-01-19 14:16:19 -0800666
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400667 return count;
668}
669
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200670define_one_global_rw(sampling_rate);
Linus Torvalds07d77752010-05-18 08:49:13 -0700671define_one_global_rw(io_is_busy);
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200672define_one_global_rw(up_threshold);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700673define_one_global_rw(down_differential);
David C Niemi3f78a9f2010-10-06 16:54:24 -0400674define_one_global_rw(sampling_down_factor);
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200675define_one_global_rw(ignore_nice_load);
676define_one_global_rw(powersave_bias);
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700677define_one_global_rw(up_threshold_multi_core);
678define_one_global_rw(optimal_freq);
679define_one_global_rw(up_threshold_any_cpu_load);
680define_one_global_rw(sync_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681
Dave Jones2b03f892009-01-18 01:43:44 -0500682static struct attribute *dbs_attributes[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 &sampling_rate_min.attr,
684 &sampling_rate.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 &up_threshold.attr,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700686 &down_differential.attr,
David C Niemi3f78a9f2010-10-06 16:54:24 -0400687 &sampling_down_factor.attr,
Alexander Clouter001893c2005-12-01 01:09:25 -0800688 &ignore_nice_load.attr,
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400689 &powersave_bias.attr,
Arjan van de Ven19379b12010-05-09 08:26:51 -0700690 &io_is_busy.attr,
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700691 &up_threshold_multi_core.attr,
692 &optimal_freq.attr,
693 &up_threshold_any_cpu_load.attr,
694 &sync_freq.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 NULL
696};
697
698static struct attribute_group dbs_attr_group = {
699 .attrs = dbs_attributes,
700 .name = "ondemand",
701};
702
703/************************** sysfs end ************************/
704
Mike Chan00e299f2010-01-26 17:06:47 -0800705static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
706{
707 if (dbs_tuners_ins.powersave_bias)
708 freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
709 else if (p->cur == p->max)
710 return;
711
712 __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ?
713 CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
714}
715
Venkatesh Pallipadi2f8a8352006-06-28 13:51:19 -0700716static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717{
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800718 /* Extrapolated load of this CPU */
719 unsigned int load_at_max_freq = 0;
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700720 unsigned int max_load_freq;
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800721 /* Current load across this CPU */
722 unsigned int cur_load = 0;
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700723 unsigned int max_load_other_cpu = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 struct cpufreq_policy *policy;
725 unsigned int j;
726
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400727 this_dbs_info->freq_lo = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 policy = this_dbs_info->cur_policy;
Venki Pallipadiea487612007-06-20 14:26:24 -0700729
Dave Jones32ee8c32006-02-28 00:43:23 -0500730 /*
Dave Jonesc29f1402005-05-31 19:03:50 -0700731 * Every sampling_rate, we check, if current idle time is less
732 * than 20% (default), then we try to increase frequency
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700733 * Every sampling_rate, we look for a the lowest
Dave Jonesc29f1402005-05-31 19:03:50 -0700734 * frequency which can sustain the load while keeping idle time over
735 * 30%. If such a frequency exist, we try to decrease to this frequency.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 *
Dave Jones32ee8c32006-02-28 00:43:23 -0500737 * Any frequency increase takes it to the maximum frequency.
738 * Frequency reduction happens at minimum steps of
739 * 5% (default) of current frequency
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 */
741
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700742 /* Get Absolute Load - in terms of freq */
743 max_load_freq = 0;
744
Rusty Russell835481d2009-01-04 05:18:06 -0800745 for_each_cpu(j, policy->cpus) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 struct cpu_dbs_info_s *j_dbs_info;
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700747 cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
748 unsigned int idle_time, wall_time, iowait_time;
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800749 unsigned int load_freq;
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700750 int freq_avg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751
Tejun Heo245b2e72009-06-24 15:13:48 +0900752 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700753
754 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700755 cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700756
Martin Schwidefsky64861632011-12-15 14:56:09 +0100757 wall_time = (unsigned int)
758 (cur_wall_time - j_dbs_info->prev_cpu_wall);
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700759 j_dbs_info->prev_cpu_wall = cur_wall_time;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760
Martin Schwidefsky64861632011-12-15 14:56:09 +0100761 idle_time = (unsigned int)
762 (cur_idle_time - j_dbs_info->prev_cpu_idle);
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700763 j_dbs_info->prev_cpu_idle = cur_idle_time;
764
Martin Schwidefsky64861632011-12-15 14:56:09 +0100765 iowait_time = (unsigned int)
766 (cur_iowait_time - j_dbs_info->prev_cpu_iowait);
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700767 j_dbs_info->prev_cpu_iowait = cur_iowait_time;
768
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500769 if (dbs_tuners_ins.ignore_nice) {
Glauber Costa3292beb2011-11-28 14:45:17 -0200770 u64 cur_nice;
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500771 unsigned long cur_nice_jiffies;
772
Glauber Costa3292beb2011-11-28 14:45:17 -0200773 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
774 j_dbs_info->prev_cpu_nice;
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500775 /*
776 * Assumption: nice time between sampling periods will
777 * be less than 2^32 jiffies for 32 bit sys
778 */
779 cur_nice_jiffies = (unsigned long)
780 cputime64_to_jiffies64(cur_nice);
781
Glauber Costa3292beb2011-11-28 14:45:17 -0200782 j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500783 idle_time += jiffies_to_usecs(cur_nice_jiffies);
784 }
785
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700786 /*
787 * For the purpose of ondemand, waiting for disk IO is an
788 * indication that you're performance critical, and not that
789 * the system is actually idle. So subtract the iowait time
790 * from the cpu idle time.
791 */
792
Arjan van de Ven19379b12010-05-09 08:26:51 -0700793 if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time)
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700794 idle_time -= iowait_time;
795
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700796 if (unlikely(!wall_time || wall_time < idle_time))
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700797 continue;
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700798
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800799 cur_load = 100 * (wall_time - idle_time) / wall_time;
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700800 j_dbs_info->max_load = max(cur_load, j_dbs_info->prev_load);
801 j_dbs_info->prev_load = cur_load;
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700802 freq_avg = __cpufreq_driver_getavg(policy, j);
803 if (freq_avg <= 0)
804 freq_avg = policy->cur;
805
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800806 load_freq = cur_load * freq_avg;
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700807 if (load_freq > max_load_freq)
808 max_load_freq = load_freq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 }
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700810
811 for_each_online_cpu(j) {
812 struct cpu_dbs_info_s *j_dbs_info;
813 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
814
815 if (j == policy->cpu)
816 continue;
817
818 if (max_load_other_cpu < j_dbs_info->max_load)
819 max_load_other_cpu = j_dbs_info->max_load;
820 /*
821 * The other cpu could be running at higher frequency
822 * but may not have completed it's sampling_down_factor.
823 * For that case consider other cpu is loaded so that
824 * frequency imbalance does not occur.
825 */
826
827 if ((j_dbs_info->cur_policy != NULL)
828 && (j_dbs_info->cur_policy->cur ==
829 j_dbs_info->cur_policy->max)) {
830
831 if (policy->cur >= dbs_tuners_ins.optimal_freq)
832 max_load_other_cpu =
833 dbs_tuners_ins.up_threshold_any_cpu_load;
834 }
835 }
836
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800837 /* calculate the scaled load across CPU */
838 load_at_max_freq = (cur_load * policy->cur)/policy->cpuinfo.max_freq;
839
840 cpufreq_notify_utilization(policy, load_at_max_freq);
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700841 /* Check for frequency increase */
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700842 if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
David C Niemi3f78a9f2010-10-06 16:54:24 -0400843 /* If switching to max speed, apply sampling_down_factor */
844 if (policy->cur < policy->max)
845 this_dbs_info->rate_mult =
846 dbs_tuners_ins.sampling_down_factor;
Mike Chan00e299f2010-01-26 17:06:47 -0800847 dbs_freq_increase(policy, policy->max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 return;
849 }
850
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700851 if (num_online_cpus() > 1) {
852
853 if (max_load_other_cpu >
854 dbs_tuners_ins.up_threshold_any_cpu_load) {
855 if (policy->cur < dbs_tuners_ins.sync_freq)
856 dbs_freq_increase(policy,
857 dbs_tuners_ins.sync_freq);
858 return;
859 }
860
861 if (max_load_freq > dbs_tuners_ins.up_threshold_multi_core *
862 policy->cur) {
863 if (policy->cur < dbs_tuners_ins.optimal_freq)
864 dbs_freq_increase(policy,
865 dbs_tuners_ins.optimal_freq);
866 return;
867 }
868 }
869
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 /* Check for frequency decrease */
Dave Jonesc29f1402005-05-31 19:03:50 -0700871 /* if we cannot reduce the frequency anymore, break out early */
872 if (policy->cur == policy->min)
873 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874
Dave Jonesc29f1402005-05-31 19:03:50 -0700875 /*
876 * The optimal frequency is the frequency that is the lowest that
877 * can support the current CPU usage without triggering the up
878 * policy. To be safe, we focus 10 points under the threshold.
879 */
venkatesh.pallipadi@intel.come9d95bf2008-08-04 11:59:10 -0700880 if (max_load_freq <
881 (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) *
882 policy->cur) {
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700883 unsigned int freq_next;
venkatesh.pallipadi@intel.come9d95bf2008-08-04 11:59:10 -0700884 freq_next = max_load_freq /
885 (dbs_tuners_ins.up_threshold -
886 dbs_tuners_ins.down_differential);
Venkatesh Pallipadidfde5d62006-10-03 12:38:45 -0700887
David C Niemi3f78a9f2010-10-06 16:54:24 -0400888 /* No longer fully busy, reset rate_mult */
889 this_dbs_info->rate_mult = 1;
890
Nagananda.Chumbalkar@hp.com1dbf5882009-12-21 23:40:52 +0100891 if (freq_next < policy->min)
892 freq_next = policy->min;
893
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700894 if (num_online_cpus() > 1) {
895 if (max_load_other_cpu >
896 (dbs_tuners_ins.up_threshold_multi_core -
897 dbs_tuners_ins.down_differential) &&
898 freq_next < dbs_tuners_ins.sync_freq)
899 freq_next = dbs_tuners_ins.sync_freq;
900
901 if (max_load_freq >
Veena Sambasivan8aa24862013-05-14 12:36:48 -0700902 ((dbs_tuners_ins.up_threshold_multi_core -
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700903 dbs_tuners_ins.down_differential_multi_core) *
Veena Sambasivan8aa24862013-05-14 12:36:48 -0700904 policy->cur) &&
905 freq_next < dbs_tuners_ins.optimal_freq)
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700906 freq_next = dbs_tuners_ins.optimal_freq;
907
908 }
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400909 if (!dbs_tuners_ins.powersave_bias) {
910 __cpufreq_driver_target(policy, freq_next,
911 CPUFREQ_RELATION_L);
912 } else {
913 int freq = powersave_bias_target(policy, freq_next,
914 CPUFREQ_RELATION_L);
915 __cpufreq_driver_target(policy, freq,
916 CPUFREQ_RELATION_L);
917 }
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700918 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919}
920
David Howellsc4028952006-11-22 14:57:56 +0000921static void do_dbs_timer(struct work_struct *work)
Dave Jones32ee8c32006-02-28 00:43:23 -0500922{
Venkatesh Pallipadi529af7a2007-02-05 16:12:44 -0800923 struct cpu_dbs_info_s *dbs_info =
924 container_of(work, struct cpu_dbs_info_s, work.work);
925 unsigned int cpu = dbs_info->cpu;
926 int sample_type = dbs_info->sample_type;
927
Vincent Guittot5cb2c3b2011-02-07 17:14:25 +0100928 int delay;
Jocelyn Falempea665df92010-03-11 14:01:11 -0800929
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700930 mutex_lock(&dbs_info->timer_mutex);
Venkatesh Pallipadi56463b72007-02-05 16:12:45 -0800931
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400932 /* Common NORMAL_SAMPLE setup */
David Howellsc4028952006-11-22 14:57:56 +0000933 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400934 if (!dbs_tuners_ins.powersave_bias ||
David Howellsc4028952006-11-22 14:57:56 +0000935 sample_type == DBS_NORMAL_SAMPLE) {
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400936 dbs_check_cpu(dbs_info);
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400937 if (dbs_info->freq_lo) {
938 /* Setup timer for SUB_SAMPLE */
David Howellsc4028952006-11-22 14:57:56 +0000939 dbs_info->sample_type = DBS_SUB_SAMPLE;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400940 delay = dbs_info->freq_hi_jiffies;
Vincent Guittot5cb2c3b2011-02-07 17:14:25 +0100941 } else {
942 /* We want all CPUs to do sampling nearly on
943 * same jiffy
944 */
945 delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
946 * dbs_info->rate_mult);
947
948 if (num_online_cpus() > 1)
949 delay -= jiffies % delay;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400950 }
951 } else {
952 __cpufreq_driver_target(dbs_info->cur_policy,
Dave Jones2b03f892009-01-18 01:43:44 -0500953 dbs_info->freq_lo, CPUFREQ_RELATION_H);
Vincent Guittot5cb2c3b2011-02-07 17:14:25 +0100954 delay = dbs_info->freq_lo_jiffies;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400955 }
Matt Wagantall2aa4f052013-05-23 15:52:49 -0700956 queue_delayed_work_on(cpu, dbs_wq, &dbs_info->work, delay);
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700957 mutex_unlock(&dbs_info->timer_mutex);
Dave Jones32ee8c32006-02-28 00:43:23 -0500958}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959
Venkatesh Pallipadi529af7a2007-02-05 16:12:44 -0800960static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961{
Alexey Starikovskiy1ce28d62006-07-31 22:25:20 +0400962 /* We want all CPUs to do sampling nearly on same jiffy */
963 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
Jocelyn Falempea665df92010-03-11 14:01:11 -0800964
965 if (num_online_cpus() > 1)
966 delay -= jiffies % delay;
Venkatesh Pallipadi2f8a8352006-06-28 13:51:19 -0700967
David Howellsc4028952006-11-22 14:57:56 +0000968 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
Venki Pallipadi28287032007-05-08 00:27:47 -0700969 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
Matt Wagantall2aa4f052013-05-23 15:52:49 -0700970 queue_delayed_work_on(dbs_info->cpu, dbs_wq, &dbs_info->work, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971}
972
Linus Torvalds2cd7cbd2006-07-23 12:05:00 -0700973static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974{
Mathieu Desnoyersb14893a2009-05-17 10:30:45 -0400975 cancel_delayed_work_sync(&dbs_info->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976}
977
Arjan van de Ven19379b12010-05-09 08:26:51 -0700978/*
979 * Not all CPUs want IO time to be accounted as busy; this dependson how
980 * efficient idling at a higher frequency/voltage is.
981 * Pavel Machek says this is not so for various generations of AMD and old
982 * Intel systems.
983 * Mike Chan (androidlcom) calis this is also not true for ARM.
984 * Because of this, whitelist specific known (series) of CPUs by default, and
985 * leave all others up to the user.
986 */
987static int should_io_be_busy(void)
988{
989#if defined(CONFIG_X86)
990 /*
991 * For Intel, Core 2 (model 15) andl later have an efficient idle.
992 */
993 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
994 boot_cpu_data.x86 == 6 &&
995 boot_cpu_data.x86_model >= 15)
996 return 1;
997#endif
998 return 0;
999}
1000
Stephen Boydc8fc3012012-10-31 17:43:08 -07001001static void dbs_refresh_callback(struct work_struct *work)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001002{
1003 struct cpufreq_policy *policy;
1004 struct cpu_dbs_info_s *this_dbs_info;
Stephen Boydc8fc3012012-10-31 17:43:08 -07001005 struct dbs_work_struct *dbs_work;
1006 unsigned int cpu;
1007
1008 dbs_work = container_of(work, struct dbs_work_struct, work);
1009 cpu = dbs_work->cpu;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001010
Krishna Vankaa3e04d82012-06-08 11:35:43 +05301011 get_online_cpus();
1012
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001013 if (lock_policy_rwsem_write(cpu) < 0)
Krishna Vankaa3e04d82012-06-08 11:35:43 +05301014 goto bail_acq_sema_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001015
1016 this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
1017 policy = this_dbs_info->cur_policy;
David Ng4a0a0232011-08-03 14:04:43 -07001018 if (!policy) {
1019 /* CPU not using ondemand governor */
Krishna Vankaa3e04d82012-06-08 11:35:43 +05301020 goto bail_incorrect_governor;
David Ng4a0a0232011-08-03 14:04:43 -07001021 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001022
1023 if (policy->cur < policy->max) {
Anji Jonnalaf8732322012-12-13 14:03:54 +05301024 /*
1025 * Arch specific cpufreq driver may fail.
1026 * Don't update governor frequency upon failure.
1027 */
1028 if (__cpufreq_driver_target(policy, policy->max,
1029 CPUFREQ_RELATION_L) >= 0)
1030 policy->cur = policy->max;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001031
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001032 this_dbs_info->prev_cpu_idle = get_cpu_idle_time(cpu,
1033 &this_dbs_info->prev_cpu_wall);
1034 }
Krishna Vankaa3e04d82012-06-08 11:35:43 +05301035
1036bail_incorrect_governor:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001037 unlock_policy_rwsem_write(cpu);
Krishna Vankaa3e04d82012-06-08 11:35:43 +05301038
1039bail_acq_sema_failed:
1040 put_online_cpus();
1041 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001042}
1043
Rohit Guptac496cdd2013-04-04 15:45:16 -07001044static int dbs_migration_notify(struct notifier_block *nb,
1045 unsigned long target_cpu, void *arg)
1046{
Steve Muckle538cfc12013-05-31 10:39:31 -07001047 struct cpu_dbs_info_s *target_dbs_info =
1048 &per_cpu(od_cpu_dbs_info, target_cpu);
Rohit Guptac496cdd2013-04-04 15:45:16 -07001049
Steve Muckle538cfc12013-05-31 10:39:31 -07001050 atomic_set(&target_dbs_info->src_sync_cpu, (int)arg);
1051 wake_up(&target_dbs_info->sync_wq);
Rohit Guptac496cdd2013-04-04 15:45:16 -07001052
1053 return NOTIFY_OK;
1054}
1055
1056static struct notifier_block dbs_migration_nb = {
1057 .notifier_call = dbs_migration_notify,
1058};
1059
Steve Muckle538cfc12013-05-31 10:39:31 -07001060static int sync_pending(struct cpu_dbs_info_s *this_dbs_info)
Rohit Guptac496cdd2013-04-04 15:45:16 -07001061{
Steve Muckle538cfc12013-05-31 10:39:31 -07001062 return atomic_read(&this_dbs_info->src_sync_cpu) >= 0;
1063}
1064
1065static int dbs_sync_thread(void *data)
1066{
1067 int src_cpu, cpu = (int)data;
Rohit Guptac496cdd2013-04-04 15:45:16 -07001068 unsigned int src_freq, src_max_load;
Steve Muckle538cfc12013-05-31 10:39:31 -07001069 struct cpu_dbs_info_s *this_dbs_info, *src_dbs_info;
1070 struct cpufreq_policy *policy;
Rohit Guptac496cdd2013-04-04 15:45:16 -07001071 int delay;
1072
Rohit Guptac496cdd2013-04-04 15:45:16 -07001073 this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
Rohit Guptac496cdd2013-04-04 15:45:16 -07001074
Steve Muckle538cfc12013-05-31 10:39:31 -07001075 while (1) {
1076 wait_event(this_dbs_info->sync_wq,
1077 sync_pending(this_dbs_info) ||
1078 kthread_should_stop());
Rohit Guptac496cdd2013-04-04 15:45:16 -07001079
Steve Muckle538cfc12013-05-31 10:39:31 -07001080 if (kthread_should_stop())
1081 break;
Rohit Guptac496cdd2013-04-04 15:45:16 -07001082
Steve Muckle538cfc12013-05-31 10:39:31 -07001083 get_online_cpus();
Rohit Guptac496cdd2013-04-04 15:45:16 -07001084
Steve Muckle538cfc12013-05-31 10:39:31 -07001085 src_cpu = atomic_read(&this_dbs_info->src_sync_cpu);
1086 src_dbs_info = &per_cpu(od_cpu_dbs_info, src_cpu);
1087 if (src_dbs_info != NULL &&
1088 src_dbs_info->cur_policy != NULL) {
1089 src_freq = src_dbs_info->cur_policy->cur;
1090 src_max_load = src_dbs_info->max_load;
1091 } else {
1092 src_freq = dbs_tuners_ins.sync_freq;
1093 src_max_load = 0;
1094 }
Rohit Guptac496cdd2013-04-04 15:45:16 -07001095
Steve Muckle538cfc12013-05-31 10:39:31 -07001096 if (lock_policy_rwsem_write(cpu) < 0)
1097 goto bail_acq_sema_failed;
1098
Rohit Gupta01585132013-06-17 17:56:27 -07001099 if (!atomic_read(&this_dbs_info->sync_enabled)) {
1100 atomic_set(&this_dbs_info->src_sync_cpu, -1);
1101 put_online_cpus();
1102 unlock_policy_rwsem_write(cpu);
1103 continue;
1104 }
1105
Steve Muckle538cfc12013-05-31 10:39:31 -07001106 policy = this_dbs_info->cur_policy;
1107 if (!policy) {
1108 /* CPU not using ondemand governor */
1109 goto bail_incorrect_governor;
1110 }
1111 delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
1112
1113
1114 if (policy->cur < src_freq) {
1115 /* cancel the next ondemand sample */
1116 cancel_delayed_work_sync(&this_dbs_info->work);
1117
1118 /*
1119 * Arch specific cpufreq driver may fail.
1120 * Don't update governor frequency upon failure.
1121 */
1122 if (__cpufreq_driver_target(policy, src_freq,
1123 CPUFREQ_RELATION_L) >= 0) {
1124 policy->cur = src_freq;
1125 if (src_max_load > this_dbs_info->max_load) {
1126 this_dbs_info->max_load = src_max_load;
1127 this_dbs_info->prev_load = src_max_load;
1128 }
1129 }
1130
1131 /* reschedule the next ondemand sample */
1132 mutex_lock(&this_dbs_info->timer_mutex);
1133 queue_delayed_work_on(cpu, dbs_wq,
1134 &this_dbs_info->work, delay);
1135 mutex_unlock(&this_dbs_info->timer_mutex);
1136 }
1137
1138bail_incorrect_governor:
1139 unlock_policy_rwsem_write(cpu);
Rohit Guptac496cdd2013-04-04 15:45:16 -07001140bail_acq_sema_failed:
Steve Muckle538cfc12013-05-31 10:39:31 -07001141 put_online_cpus();
1142 atomic_set(&this_dbs_info->src_sync_cpu, -1);
1143 }
1144
1145 return 0;
Rohit Guptac496cdd2013-04-04 15:45:16 -07001146}
1147
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001148static void dbs_input_event(struct input_handle *handle, unsigned int type,
1149 unsigned int code, int value)
1150{
Matt Wagantall2100f002012-10-19 15:26:48 -07001151 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001152
David Ng8192a2f2012-01-19 14:16:19 -08001153 if ((dbs_tuners_ins.powersave_bias == POWERSAVE_BIAS_MAXLEVEL) ||
1154 (dbs_tuners_ins.powersave_bias == POWERSAVE_BIAS_MINLEVEL)) {
1155 /* nothing to do */
1156 return;
1157 }
1158
Stephen Boydc8fc3012012-10-31 17:43:08 -07001159 for_each_online_cpu(i)
Matt Wagantall2aa4f052013-05-23 15:52:49 -07001160 queue_work_on(i, dbs_wq, &per_cpu(dbs_refresh_work, i).work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001161}
1162
1163static int dbs_input_connect(struct input_handler *handler,
1164 struct input_dev *dev, const struct input_device_id *id)
1165{
1166 struct input_handle *handle;
1167 int error;
1168
1169 handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
1170 if (!handle)
1171 return -ENOMEM;
1172
1173 handle->dev = dev;
1174 handle->handler = handler;
1175 handle->name = "cpufreq";
1176
1177 error = input_register_handle(handle);
1178 if (error)
1179 goto err2;
1180
1181 error = input_open_device(handle);
1182 if (error)
1183 goto err1;
1184
1185 return 0;
1186err1:
1187 input_unregister_handle(handle);
1188err2:
1189 kfree(handle);
1190 return error;
1191}
1192
1193static void dbs_input_disconnect(struct input_handle *handle)
1194{
1195 input_close_device(handle);
1196 input_unregister_handle(handle);
1197 kfree(handle);
1198}
1199
1200static const struct input_device_id dbs_ids[] = {
Tingwei Zhangcb74f482013-07-03 16:28:24 +08001201 /* multi-touch touchscreen */
1202 {
1203 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
1204 INPUT_DEVICE_ID_MATCH_ABSBIT,
1205 .evbit = { BIT_MASK(EV_ABS) },
1206 .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
1207 BIT_MASK(ABS_MT_POSITION_X) |
1208 BIT_MASK(ABS_MT_POSITION_Y) },
1209 },
1210 /* touchpad */
1211 {
1212 .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
1213 INPUT_DEVICE_ID_MATCH_ABSBIT,
1214 .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
1215 .absbit = { [BIT_WORD(ABS_X)] =
1216 BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
1217 },
1218 /* Keypad */
1219 {
1220 .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
1221 .evbit = { BIT_MASK(EV_KEY) },
1222 },
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001223 { },
1224};
1225
1226static struct input_handler dbs_input_handler = {
1227 .event = dbs_input_event,
1228 .connect = dbs_input_connect,
1229 .disconnect = dbs_input_disconnect,
1230 .name = "cpufreq_ond",
1231 .id_table = dbs_ids,
1232};
1233
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
1235 unsigned int event)
1236{
1237 unsigned int cpu = policy->cpu;
1238 struct cpu_dbs_info_s *this_dbs_info;
1239 unsigned int j;
Jeff Garzik914f7c32006-10-20 14:31:00 -07001240 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241
Tejun Heo245b2e72009-06-24 15:13:48 +09001242 this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243
1244 switch (event) {
1245 case CPUFREQ_GOV_START:
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -07001246 if ((!cpu_online(cpu)) || (!policy->cur))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 return -EINVAL;
1248
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08001249 mutex_lock(&dbs_mutex);
Jeff Garzik914f7c32006-10-20 14:31:00 -07001250
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -07001251 dbs_enable++;
Rusty Russell835481d2009-01-04 05:18:06 -08001252 for_each_cpu(j, policy->cpus) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 struct cpu_dbs_info_s *j_dbs_info;
Tejun Heo245b2e72009-06-24 15:13:48 +09001254 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 j_dbs_info->cur_policy = policy;
Dave Jones32ee8c32006-02-28 00:43:23 -05001256
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -07001257 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
1258 &j_dbs_info->prev_cpu_wall);
Glauber Costa3292beb2011-11-28 14:45:17 -02001259 if (dbs_tuners_ins.ignore_nice)
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -05001260 j_dbs_info->prev_cpu_nice =
Glauber Costa3292beb2011-11-28 14:45:17 -02001261 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
Steve Muckle538cfc12013-05-31 10:39:31 -07001262 set_cpus_allowed(j_dbs_info->sync_thread,
1263 *cpumask_of(j));
Rohit Gupta01585132013-06-17 17:56:27 -07001264 if (!dbs_tuners_ins.powersave_bias)
1265 atomic_set(&j_dbs_info->sync_enabled, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 }
Venkatesh Pallipadi529af7a2007-02-05 16:12:44 -08001267 this_dbs_info->cpu = cpu;
David C Niemi3f78a9f2010-10-06 16:54:24 -04001268 this_dbs_info->rate_mult = 1;
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -07001269 ondemand_powersave_bias_init_cpu(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 /*
1271 * Start the timerschedule work, when this governor
1272 * is used for first time
1273 */
1274 if (dbs_enable == 1) {
1275 unsigned int latency;
Thomas Renninger0e625ac2009-07-24 15:25:06 +02001276
1277 rc = sysfs_create_group(cpufreq_global_kobject,
1278 &dbs_attr_group);
1279 if (rc) {
1280 mutex_unlock(&dbs_mutex);
1281 return rc;
1282 }
1283
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 /* policy latency is in nS. Convert it to uS first */
Dave Jonesdf8b59b2005-09-20 12:39:35 -07001285 latency = policy->cpuinfo.transition_latency / 1000;
1286 if (latency == 0)
1287 latency = 1;
Thomas Renningercef96152009-04-22 13:48:29 +02001288 /* Bring kernel and HW constraints together */
1289 min_sampling_rate = max(min_sampling_rate,
1290 MIN_LATENCY_MULTIPLIER * latency);
1291 dbs_tuners_ins.sampling_rate =
1292 max(min_sampling_rate,
1293 latency * LATENCY_MULTIPLIER);
Arjan van de Ven19379b12010-05-09 08:26:51 -07001294 dbs_tuners_ins.io_is_busy = should_io_be_busy();
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -07001295
1296 if (dbs_tuners_ins.optimal_freq == 0)
1297 dbs_tuners_ins.optimal_freq = policy->min;
1298
1299 if (dbs_tuners_ins.sync_freq == 0)
1300 dbs_tuners_ins.sync_freq = policy->min;
Rohit Guptac496cdd2013-04-04 15:45:16 -07001301
1302 atomic_notifier_chain_register(&migration_notifier_head,
1303 &dbs_migration_nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001305 if (!cpu)
1306 rc = input_register_handler(&dbs_input_handler);
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08001307 mutex_unlock(&dbs_mutex);
venkatesh.pallipadi@intel.com7d26e2d2009-07-02 17:08:30 -07001308
David Ng8192a2f2012-01-19 14:16:19 -08001309
1310 if (!ondemand_powersave_bias_setspeed(
1311 this_dbs_info->cur_policy,
1312 NULL,
1313 dbs_tuners_ins.powersave_bias))
1314 dbs_timer_init(this_dbs_info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 break;
1316
1317 case CPUFREQ_GOV_STOP:
Linus Torvalds2cd7cbd2006-07-23 12:05:00 -07001318 dbs_timer_exit(this_dbs_info);
venkatesh.pallipadi@intel.com7d26e2d2009-07-02 17:08:30 -07001319
1320 mutex_lock(&dbs_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 dbs_enable--;
Steve Muckle538cfc12013-05-31 10:39:31 -07001322
1323 for_each_cpu(j, policy->cpus) {
1324 struct cpu_dbs_info_s *j_dbs_info;
1325 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
1326 atomic_set(&j_dbs_info->sync_enabled, 0);
1327 }
1328
Anitha Anand3dd65092012-01-18 17:17:40 -08001329 /* If device is being removed, policy is no longer
1330 * valid. */
1331 this_dbs_info->cur_policy = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001332 if (!cpu)
1333 input_unregister_handler(&dbs_input_handler);
Rohit Guptac496cdd2013-04-04 15:45:16 -07001334 if (!dbs_enable) {
Thomas Renninger0e625ac2009-07-24 15:25:06 +02001335 sysfs_remove_group(cpufreq_global_kobject,
1336 &dbs_attr_group);
Rohit Guptac496cdd2013-04-04 15:45:16 -07001337 atomic_notifier_chain_unregister(
1338 &migration_notifier_head,
1339 &dbs_migration_nb);
1340 }
1341
Venkat Devarasetty4edc7662013-01-30 18:08:51 +05301342 mutex_unlock(&dbs_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343
1344 break;
1345
1346 case CPUFREQ_GOV_LIMITS:
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -07001347 mutex_lock(&this_dbs_info->timer_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 if (policy->max < this_dbs_info->cur_policy->cur)
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -07001349 __cpufreq_driver_target(this_dbs_info->cur_policy,
Dave Jones2b03f892009-01-18 01:43:44 -05001350 policy->max, CPUFREQ_RELATION_H);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 else if (policy->min > this_dbs_info->cur_policy->cur)
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -07001352 __cpufreq_driver_target(this_dbs_info->cur_policy,
Dave Jones2b03f892009-01-18 01:43:44 -05001353 policy->min, CPUFREQ_RELATION_L);
David Ng8192a2f2012-01-19 14:16:19 -08001354 else if (dbs_tuners_ins.powersave_bias != 0)
1355 ondemand_powersave_bias_setspeed(
1356 this_dbs_info->cur_policy,
1357 policy,
1358 dbs_tuners_ins.powersave_bias);
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -07001359 mutex_unlock(&this_dbs_info->timer_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 break;
1361 }
1362 return 0;
1363}
1364
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365static int __init cpufreq_gov_dbs_init(void)
1366{
Andrea Righi4f6e6b92008-09-18 10:43:40 +00001367 u64 idle_time;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001368 unsigned int i;
Andrea Righi4f6e6b92008-09-18 10:43:40 +00001369 int cpu = get_cpu();
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -07001370
Kamalesh Babulal21f2e3c2011-12-09 16:18:42 +05301371 idle_time = get_cpu_idle_time_us(cpu, NULL);
Andrea Righi4f6e6b92008-09-18 10:43:40 +00001372 put_cpu();
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -07001373 if (idle_time != -1ULL) {
1374 /* Idle micro accounting is supported. Use finer thresholds */
1375 dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
1376 dbs_tuners_ins.down_differential =
1377 MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
Thomas Renningercef96152009-04-22 13:48:29 +02001378 /*
Paul Bollebd74b322011-08-06 14:33:43 +02001379 * In nohz/micro accounting case we set the minimum frequency
Thomas Renningercef96152009-04-22 13:48:29 +02001380 * not depending on HZ, but fixed (very low). The deferred
1381 * timer might skip some samples if idle/sleeping as needed.
1382 */
1383 min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
1384 } else {
1385 /* For correct statistics, we need 10 ticks for each measure */
1386 min_sampling_rate =
1387 MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -07001388 }
Akinobu Mita888a7942008-07-14 12:00:45 +09001389
Matt Wagantall2aa4f052013-05-23 15:52:49 -07001390 dbs_wq = alloc_workqueue("ondemand_dbs_wq", WQ_HIGHPRI, 0);
1391 if (!dbs_wq) {
1392 printk(KERN_ERR "Failed to create ondemand_dbs_wq workqueue\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001393 return -EFAULT;
1394 }
1395 for_each_possible_cpu(i) {
Praveen Chidambaram457a4452012-07-19 10:45:07 -06001396 struct cpu_dbs_info_s *this_dbs_info =
1397 &per_cpu(od_cpu_dbs_info, i);
Stephen Boydc8fc3012012-10-31 17:43:08 -07001398 struct dbs_work_struct *dbs_work =
1399 &per_cpu(dbs_refresh_work, i);
1400
Praveen Chidambaram457a4452012-07-19 10:45:07 -06001401 mutex_init(&this_dbs_info->timer_mutex);
Stephen Boydc8fc3012012-10-31 17:43:08 -07001402 INIT_WORK(&dbs_work->work, dbs_refresh_callback);
1403 dbs_work->cpu = i;
Rohit Guptac496cdd2013-04-04 15:45:16 -07001404
Steve Muckle538cfc12013-05-31 10:39:31 -07001405 atomic_set(&this_dbs_info->src_sync_cpu, -1);
1406 init_waitqueue_head(&this_dbs_info->sync_wq);
Rohit Guptac496cdd2013-04-04 15:45:16 -07001407
Steve Muckle538cfc12013-05-31 10:39:31 -07001408 this_dbs_info->sync_thread = kthread_run(dbs_sync_thread,
1409 (void *)i,
1410 "dbs_sync/%d", i);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001411 }
1412
Tejun Heo57df5572011-01-26 12:12:50 +01001413 return cpufreq_register_governor(&cpufreq_gov_ondemand);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414}
1415
1416static void __exit cpufreq_gov_dbs_exit(void)
1417{
Anji Jonnala4c1485f2012-11-14 13:34:54 +05301418 unsigned int i;
1419
Thomas Renninger1c256242007-10-02 13:28:12 -07001420 cpufreq_unregister_governor(&cpufreq_gov_ondemand);
Anji Jonnala4c1485f2012-11-14 13:34:54 +05301421 for_each_possible_cpu(i) {
1422 struct cpu_dbs_info_s *this_dbs_info =
1423 &per_cpu(od_cpu_dbs_info, i);
1424 mutex_destroy(&this_dbs_info->timer_mutex);
Steve Muckle538cfc12013-05-31 10:39:31 -07001425 kthread_stop(this_dbs_info->sync_thread);
Anji Jonnala4c1485f2012-11-14 13:34:54 +05301426 }
Matt Wagantall2aa4f052013-05-23 15:52:49 -07001427 destroy_workqueue(dbs_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428}
1429
1430
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -07001431MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
1432MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
1433MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
Dave Jones2b03f892009-01-18 01:43:44 -05001434 "Low Latency Frequency Transition capable processors");
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -07001435MODULE_LICENSE("GPL");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436
Johannes Weiner69157192008-01-17 15:21:08 -08001437#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
1438fs_initcall(cpufreq_gov_dbs_init);
1439#else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440module_init(cpufreq_gov_dbs_init);
Johannes Weiner69157192008-01-17 15:21:08 -08001441#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442module_exit(cpufreq_gov_dbs_exit);