blob: 1f60c6555bd08f9c491147cc0f832f90bd9cd4b2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * drivers/cpufreq/cpufreq_ondemand.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
Rohit Guptac496cdd2013-04-04 15:45:16 -07007 * (c) 2013 The Linux Foundation. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/cpufreq.h>
Andrew Morton138a01282006-06-23 03:31:19 -070018#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/jiffies.h>
20#include <linux/kernel_stat.h>
akpm@osdl.org3fc54d32006-01-13 15:54:22 -080021#include <linux/mutex.h>
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -070022#include <linux/hrtimer.h>
23#include <linux/tick.h>
24#include <linux/ktime.h>
Steve Muckle538cfc12013-05-31 10:39:31 -070025#include <linux/kthread.h>
Thomas Renninger9411b4e2009-02-04 11:54:04 +010026#include <linux/sched.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027#include <linux/input.h>
28#include <linux/workqueue.h>
29#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
31/*
32 * dbs is used in this file as a shortform for demandbased switching
33 * It helps to keep variable names smaller, simpler
34 */
35
venkatesh.pallipadi@intel.come9d95bf2008-08-04 11:59:10 -070036#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#define DEF_FREQUENCY_UP_THRESHOLD (80)
David C Niemi3f78a9f2010-10-06 16:54:24 -040038#define DEF_SAMPLING_DOWN_FACTOR (1)
39#define MAX_SAMPLING_DOWN_FACTOR (100000)
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -070040#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
41#define MICRO_FREQUENCY_UP_THRESHOLD (95)
Thomas Renningercef96152009-04-22 13:48:29 +020042#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
Dave Jonesc29f1402005-05-31 19:03:50 -070043#define MIN_FREQUENCY_UP_THRESHOLD (11)
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#define MAX_FREQUENCY_UP_THRESHOLD (100)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045#define MIN_FREQUENCY_DOWN_DIFFERENTIAL (1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Dave Jones32ee8c32006-02-28 00:43:23 -050047/*
48 * The polling frequency of this governor depends on the capability of
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 * the processor. Default polling frequency is 1000 times the transition
Dave Jones32ee8c32006-02-28 00:43:23 -050050 * latency of the processor. The governor will work on any processor with
51 * transition latency <= 10mS, using appropriate sampling
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 * rate.
53 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
54 * this governor will not work.
55 * All times here are in uS.
56 */
Dave Jonesdf8b59b2005-09-20 12:39:35 -070057#define MIN_SAMPLING_RATE_RATIO (2)
Thomas Renninger112124a2009-02-04 11:55:12 +010058
Thomas Renningercef96152009-04-22 13:48:29 +020059static unsigned int min_sampling_rate;
60
Thomas Renninger112124a2009-02-04 11:55:12 +010061#define LATENCY_MULTIPLIER (1000)
Thomas Renningercef96152009-04-22 13:48:29 +020062#define MIN_LATENCY_MULTIPLIER (100)
Thomas Renninger1c256242007-10-02 13:28:12 -070063#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
David Ng8192a2f2012-01-19 14:16:19 -080065#define POWERSAVE_BIAS_MAXLEVEL (1000)
66#define POWERSAVE_BIAS_MINLEVEL (-1000)
67
David Howellsc4028952006-11-22 14:57:56 +000068static void do_dbs_timer(struct work_struct *work);
Thomas Renninger0e625ac2009-07-24 15:25:06 +020069static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
70 unsigned int event);
71
72#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
73static
74#endif
75struct cpufreq_governor cpufreq_gov_ondemand = {
76 .name = "ondemand",
77 .governor = cpufreq_governor_dbs,
78 .max_transition_latency = TRANSITION_LATENCY_LIMIT,
79 .owner = THIS_MODULE,
80};
David Howellsc4028952006-11-22 14:57:56 +000081
82/* Sampling types */
Venkatesh Pallipadi529af7a2007-02-05 16:12:44 -080083enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
85struct cpu_dbs_info_s {
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -070086 cputime64_t prev_cpu_idle;
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -070087 cputime64_t prev_cpu_iowait;
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -070088 cputime64_t prev_cpu_wall;
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -070089 cputime64_t prev_cpu_nice;
Dave Jones32ee8c32006-02-28 00:43:23 -050090 struct cpufreq_policy *cur_policy;
Dave Jones2b03f892009-01-18 01:43:44 -050091 struct delayed_work work;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +040092 struct cpufreq_frequency_table *freq_table;
93 unsigned int freq_lo;
94 unsigned int freq_lo_jiffies;
95 unsigned int freq_hi_jiffies;
David C Niemi3f78a9f2010-10-06 16:54:24 -040096 unsigned int rate_mult;
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -070097 unsigned int prev_load;
98 unsigned int max_load;
Venkatesh Pallipadi529af7a2007-02-05 16:12:44 -080099 int cpu;
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700100 unsigned int sample_type:1;
101 /*
102 * percpu mutex that serializes governor limit change with
103 * do_dbs_timer invocation. We do not want do_dbs_timer to run
104 * when user is changing the governor or limits.
105 */
106 struct mutex timer_mutex;
Steve Muckle538cfc12013-05-31 10:39:31 -0700107
108 struct task_struct *sync_thread;
109 wait_queue_head_t sync_wq;
110 atomic_t src_sync_cpu;
111 atomic_t sync_enabled;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112};
Tejun Heo245b2e72009-06-24 15:13:48 +0900113static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
David Ng8192a2f2012-01-19 14:16:19 -0800115static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info);
116static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info);
117
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118static unsigned int dbs_enable; /* number of CPUs using this policy */
119
Venkatesh Pallipadi4ec223d2006-06-21 15:18:34 -0700120/*
Matt Wagantall46aa0662013-05-31 20:02:01 -0700121 * dbs_mutex protects dbs_enable and dbs_info during start/stop.
Venkatesh Pallipadi4ec223d2006-06-21 15:18:34 -0700122 */
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -0700123static DEFINE_MUTEX(dbs_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
Matt Wagantall2aa4f052013-05-23 15:52:49 -0700125static struct workqueue_struct *dbs_wq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700126
Stephen Boydc8fc3012012-10-31 17:43:08 -0700127struct dbs_work_struct {
128 struct work_struct work;
129 unsigned int cpu;
130};
131
132static DEFINE_PER_CPU(struct dbs_work_struct, dbs_refresh_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700133
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400134static struct dbs_tuners {
Dave Jones32ee8c32006-02-28 00:43:23 -0500135 unsigned int sampling_rate;
Dave Jones32ee8c32006-02-28 00:43:23 -0500136 unsigned int up_threshold;
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700137 unsigned int up_threshold_multi_core;
venkatesh.pallipadi@intel.come9d95bf2008-08-04 11:59:10 -0700138 unsigned int down_differential;
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700139 unsigned int down_differential_multi_core;
140 unsigned int optimal_freq;
141 unsigned int up_threshold_any_cpu_load;
142 unsigned int sync_freq;
Dave Jones32ee8c32006-02-28 00:43:23 -0500143 unsigned int ignore_nice;
David C Niemi3f78a9f2010-10-06 16:54:24 -0400144 unsigned int sampling_down_factor;
David Ng8192a2f2012-01-19 14:16:19 -0800145 int powersave_bias;
Arjan van de Ven19379b12010-05-09 08:26:51 -0700146 unsigned int io_is_busy;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400147} dbs_tuners_ins = {
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700148 .up_threshold_multi_core = DEF_FREQUENCY_UP_THRESHOLD,
Dave Jones32ee8c32006-02-28 00:43:23 -0500149 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
David C Niemi3f78a9f2010-10-06 16:54:24 -0400150 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
venkatesh.pallipadi@intel.come9d95bf2008-08-04 11:59:10 -0700151 .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700152 .down_differential_multi_core = MICRO_FREQUENCY_DOWN_DIFFERENTIAL,
153 .up_threshold_any_cpu_load = DEF_FREQUENCY_UP_THRESHOLD,
Eric Piel9cbad612006-03-10 11:35:27 +0200154 .ignore_nice = 0,
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400155 .powersave_bias = 0,
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700156 .sync_freq = 0,
157 .optimal_freq = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158};
159
Glauber Costa3292beb2011-11-28 14:45:17 -0200160static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
Dave Jonesdac1c1a2005-05-31 19:03:49 -0700161{
Glauber Costa3292beb2011-11-28 14:45:17 -0200162 u64 idle_time;
Martin Schwidefsky612ef282011-12-19 19:23:15 +0100163 u64 cur_wall_time;
Glauber Costa3292beb2011-11-28 14:45:17 -0200164 u64 busy_time;
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700165
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700166 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700167
Martin Schwidefsky612ef282011-12-19 19:23:15 +0100168 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
169 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
Glauber Costa3292beb2011-11-28 14:45:17 -0200170 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
171 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
172 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
173 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700174
Martin Schwidefsky64861632011-12-15 14:56:09 +0100175 idle_time = cur_wall_time - busy_time;
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700176 if (wall)
Glauber Costa3292beb2011-11-28 14:45:17 -0200177 *wall = jiffies_to_usecs(cur_wall_time);
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700178
Glauber Costa3292beb2011-11-28 14:45:17 -0200179 return jiffies_to_usecs(idle_time);
Dave Jonesdac1c1a2005-05-31 19:03:49 -0700180}
181
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -0700182static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
183{
Michal Hocko6beea0c2011-08-24 09:37:48 +0200184 u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -0700185
186 if (idle_time == -1ULL)
187 return get_cpu_idle_time_jiffy(cpu, wall);
Michal Hocko6beea0c2011-08-24 09:37:48 +0200188 else
189 idle_time += get_cpu_iowait_time_us(cpu, wall);
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -0700190
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -0700191 return idle_time;
192}
193
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700194static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall)
195{
196 u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
197
198 if (iowait_time == -1ULL)
199 return 0;
200
201 return iowait_time;
202}
203
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400204/*
205 * Find right freq to be set now with powersave_bias on.
206 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
207 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
208 */
Adrian Bunkb5ecf602006-08-13 23:00:08 +0200209static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
210 unsigned int freq_next,
211 unsigned int relation)
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400212{
David Ng8192a2f2012-01-19 14:16:19 -0800213 unsigned int freq_req, freq_avg;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400214 unsigned int freq_hi, freq_lo;
215 unsigned int index = 0;
216 unsigned int jiffies_total, jiffies_hi, jiffies_lo;
David Ng8192a2f2012-01-19 14:16:19 -0800217 int freq_reduc;
Tejun Heo245b2e72009-06-24 15:13:48 +0900218 struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
219 policy->cpu);
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400220
221 if (!dbs_info->freq_table) {
222 dbs_info->freq_lo = 0;
223 dbs_info->freq_lo_jiffies = 0;
224 return freq_next;
225 }
226
227 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
228 relation, &index);
229 freq_req = dbs_info->freq_table[index].frequency;
230 freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
231 freq_avg = freq_req - freq_reduc;
232
233 /* Find freq bounds for freq_avg in freq_table */
234 index = 0;
235 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
236 CPUFREQ_RELATION_H, &index);
237 freq_lo = dbs_info->freq_table[index].frequency;
238 index = 0;
239 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
240 CPUFREQ_RELATION_L, &index);
241 freq_hi = dbs_info->freq_table[index].frequency;
242
243 /* Find out how long we have to be in hi and lo freqs */
244 if (freq_hi == freq_lo) {
245 dbs_info->freq_lo = 0;
246 dbs_info->freq_lo_jiffies = 0;
247 return freq_lo;
248 }
249 jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
250 jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
251 jiffies_hi += ((freq_hi - freq_lo) / 2);
252 jiffies_hi /= (freq_hi - freq_lo);
253 jiffies_lo = jiffies_total - jiffies_hi;
254 dbs_info->freq_lo = freq_lo;
255 dbs_info->freq_lo_jiffies = jiffies_lo;
256 dbs_info->freq_hi_jiffies = jiffies_hi;
257 return freq_hi;
258}
259
David Ng8192a2f2012-01-19 14:16:19 -0800260static int ondemand_powersave_bias_setspeed(struct cpufreq_policy *policy,
261 struct cpufreq_policy *altpolicy,
262 int level)
263{
264 if (level == POWERSAVE_BIAS_MAXLEVEL) {
265 /* maximum powersave; set to lowest frequency */
266 __cpufreq_driver_target(policy,
267 (altpolicy) ? altpolicy->min : policy->min,
268 CPUFREQ_RELATION_L);
269 return 1;
270 } else if (level == POWERSAVE_BIAS_MINLEVEL) {
271 /* minimum powersave; set to highest frequency */
272 __cpufreq_driver_target(policy,
273 (altpolicy) ? altpolicy->max : policy->max,
274 CPUFREQ_RELATION_H);
275 return 1;
276 }
277 return 0;
278}
279
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700280static void ondemand_powersave_bias_init_cpu(int cpu)
281{
Tejun Heo384be2b2009-08-14 14:41:02 +0900282 struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700283 dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
284 dbs_info->freq_lo = 0;
285}
286
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400287static void ondemand_powersave_bias_init(void)
288{
289 int i;
290 for_each_online_cpu(i) {
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700291 ondemand_powersave_bias_init_cpu(i);
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400292 }
293}
294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295/************************** sysfs interface ************************/
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200296
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200297static ssize_t show_sampling_rate_min(struct kobject *kobj,
298 struct attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299{
Thomas Renningercef96152009-04-22 13:48:29 +0200300 return sprintf(buf, "%u\n", min_sampling_rate);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200303define_one_global_ro(sampling_rate_min);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304
305/* cpufreq_ondemand Governor Tunables */
306#define show_one(file_name, object) \
307static ssize_t show_##file_name \
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200308(struct kobject *kobj, struct attribute *attr, char *buf) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309{ \
310 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
311}
312show_one(sampling_rate, sampling_rate);
Arjan van de Ven19379b12010-05-09 08:26:51 -0700313show_one(io_is_busy, io_is_busy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314show_one(up_threshold, up_threshold);
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700315show_one(up_threshold_multi_core, up_threshold_multi_core);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700316show_one(down_differential, down_differential);
David C Niemi3f78a9f2010-10-06 16:54:24 -0400317show_one(sampling_down_factor, sampling_down_factor);
Alexander Clouter001893c2005-12-01 01:09:25 -0800318show_one(ignore_nice_load, ignore_nice);
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700319show_one(optimal_freq, optimal_freq);
320show_one(up_threshold_any_cpu_load, up_threshold_any_cpu_load);
321show_one(sync_freq, sync_freq);
David Ng8192a2f2012-01-19 14:16:19 -0800322
323static ssize_t show_powersave_bias
324(struct kobject *kobj, struct attribute *attr, char *buf)
325{
326 return snprintf(buf, PAGE_SIZE, "%d\n", dbs_tuners_ins.powersave_bias);
327}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328
MyungJoo Hamfd0ef7a2012-02-29 17:54:41 +0900329/**
330 * update_sampling_rate - update sampling rate effective immediately if needed.
331 * @new_rate: new sampling rate
332 *
333 * If new rate is smaller than the old, simply updaing
334 * dbs_tuners_int.sampling_rate might not be appropriate. For example,
335 * if the original sampling_rate was 1 second and the requested new sampling
336 * rate is 10 ms because the user needs immediate reaction from ondemand
337 * governor, but not sure if higher frequency will be required or not,
338 * then, the governor may change the sampling rate too late; up to 1 second
339 * later. Thus, if we are reducing the sampling rate, we need to make the
340 * new value effective immediately.
341 */
342static void update_sampling_rate(unsigned int new_rate)
343{
344 int cpu;
345
346 dbs_tuners_ins.sampling_rate = new_rate
347 = max(new_rate, min_sampling_rate);
348
Rohit Guptacf181752013-07-31 15:33:24 -0700349 get_online_cpus();
MyungJoo Hamfd0ef7a2012-02-29 17:54:41 +0900350 for_each_online_cpu(cpu) {
351 struct cpufreq_policy *policy;
352 struct cpu_dbs_info_s *dbs_info;
353 unsigned long next_sampling, appointed_at;
354
355 policy = cpufreq_cpu_get(cpu);
356 if (!policy)
357 continue;
358 dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu);
359 cpufreq_cpu_put(policy);
360
361 mutex_lock(&dbs_info->timer_mutex);
362
363 if (!delayed_work_pending(&dbs_info->work)) {
364 mutex_unlock(&dbs_info->timer_mutex);
365 continue;
366 }
367
368 next_sampling = jiffies + usecs_to_jiffies(new_rate);
369 appointed_at = dbs_info->work.timer.expires;
370
371
372 if (time_before(next_sampling, appointed_at)) {
373
374 mutex_unlock(&dbs_info->timer_mutex);
375 cancel_delayed_work_sync(&dbs_info->work);
376 mutex_lock(&dbs_info->timer_mutex);
377
Matt Wagantall2aa4f052013-05-23 15:52:49 -0700378 queue_delayed_work_on(dbs_info->cpu, dbs_wq,
379 &dbs_info->work, usecs_to_jiffies(new_rate));
MyungJoo Hamfd0ef7a2012-02-29 17:54:41 +0900380
381 }
382 mutex_unlock(&dbs_info->timer_mutex);
383 }
Rohit Guptacf181752013-07-31 15:33:24 -0700384 put_online_cpus();
MyungJoo Hamfd0ef7a2012-02-29 17:54:41 +0900385}
386
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200387static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
388 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389{
390 unsigned int input;
391 int ret;
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -0700392 ret = sscanf(buf, "%u", &input);
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700393 if (ret != 1)
394 return -EINVAL;
MyungJoo Hamfd0ef7a2012-02-29 17:54:41 +0900395 update_sampling_rate(input);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 return count;
397}
398
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700399static ssize_t store_sync_freq(struct kobject *a, struct attribute *b,
400 const char *buf, size_t count)
401{
402 unsigned int input;
403 int ret;
404
405 ret = sscanf(buf, "%u", &input);
406 if (ret != 1)
407 return -EINVAL;
408 dbs_tuners_ins.sync_freq = input;
409 return count;
410}
411
Arjan van de Ven19379b12010-05-09 08:26:51 -0700412static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
413 const char *buf, size_t count)
414{
415 unsigned int input;
416 int ret;
417
418 ret = sscanf(buf, "%u", &input);
419 if (ret != 1)
420 return -EINVAL;
Arjan van de Ven19379b12010-05-09 08:26:51 -0700421 dbs_tuners_ins.io_is_busy = !!input;
Arjan van de Ven19379b12010-05-09 08:26:51 -0700422 return count;
423}
424
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700425static ssize_t store_optimal_freq(struct kobject *a, struct attribute *b,
426 const char *buf, size_t count)
427{
428 unsigned int input;
429 int ret;
430
431 ret = sscanf(buf, "%u", &input);
432 if (ret != 1)
433 return -EINVAL;
434 dbs_tuners_ins.optimal_freq = input;
435 return count;
436}
437
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200438static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
439 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440{
441 unsigned int input;
442 int ret;
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -0700443 ret = sscanf(buf, "%u", &input);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444
Dave Jones32ee8c32006-02-28 00:43:23 -0500445 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
Dave Jonesc29f1402005-05-31 19:03:50 -0700446 input < MIN_FREQUENCY_UP_THRESHOLD) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 return -EINVAL;
448 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 dbs_tuners_ins.up_threshold = input;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 return count;
451}
452
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700453static ssize_t store_up_threshold_multi_core(struct kobject *a,
454 struct attribute *b, const char *buf, size_t count)
455{
456 unsigned int input;
457 int ret;
458 ret = sscanf(buf, "%u", &input);
459
460 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
461 input < MIN_FREQUENCY_UP_THRESHOLD) {
462 return -EINVAL;
463 }
464 dbs_tuners_ins.up_threshold_multi_core = input;
465 return count;
466}
467
468static ssize_t store_up_threshold_any_cpu_load(struct kobject *a,
469 struct attribute *b, const char *buf, size_t count)
470{
471 unsigned int input;
472 int ret;
473 ret = sscanf(buf, "%u", &input);
474
475 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
476 input < MIN_FREQUENCY_UP_THRESHOLD) {
477 return -EINVAL;
478 }
479 dbs_tuners_ins.up_threshold_any_cpu_load = input;
480 return count;
481}
482
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700483static ssize_t store_down_differential(struct kobject *a, struct attribute *b,
484 const char *buf, size_t count)
485{
486 unsigned int input;
487 int ret;
488 ret = sscanf(buf, "%u", &input);
489
490 if (ret != 1 || input >= dbs_tuners_ins.up_threshold ||
491 input < MIN_FREQUENCY_DOWN_DIFFERENTIAL) {
492 return -EINVAL;
493 }
494
495 dbs_tuners_ins.down_differential = input;
496
497 return count;
498}
499
David C Niemi3f78a9f2010-10-06 16:54:24 -0400500static ssize_t store_sampling_down_factor(struct kobject *a,
501 struct attribute *b, const char *buf, size_t count)
502{
503 unsigned int input, j;
504 int ret;
505 ret = sscanf(buf, "%u", &input);
506
507 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
508 return -EINVAL;
David C Niemi3f78a9f2010-10-06 16:54:24 -0400509 dbs_tuners_ins.sampling_down_factor = input;
510
511 /* Reset down sampling multiplier in case it was active */
512 for_each_online_cpu(j) {
513 struct cpu_dbs_info_s *dbs_info;
514 dbs_info = &per_cpu(od_cpu_dbs_info, j);
515 dbs_info->rate_mult = 1;
516 }
David C Niemi3f78a9f2010-10-06 16:54:24 -0400517 return count;
518}
519
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200520static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
521 const char *buf, size_t count)
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700522{
523 unsigned int input;
524 int ret;
525
526 unsigned int j;
Dave Jones32ee8c32006-02-28 00:43:23 -0500527
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -0700528 ret = sscanf(buf, "%u", &input);
Dave Jones2b03f892009-01-18 01:43:44 -0500529 if (ret != 1)
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700530 return -EINVAL;
531
Dave Jones2b03f892009-01-18 01:43:44 -0500532 if (input > 1)
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700533 input = 1;
Dave Jones32ee8c32006-02-28 00:43:23 -0500534
Dave Jones2b03f892009-01-18 01:43:44 -0500535 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700536 return count;
537 }
538 dbs_tuners_ins.ignore_nice = input;
539
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700540 /* we need to re-evaluate prev_cpu_idle */
Dave Jonesdac1c1a2005-05-31 19:03:49 -0700541 for_each_online_cpu(j) {
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700542 struct cpu_dbs_info_s *dbs_info;
Tejun Heo245b2e72009-06-24 15:13:48 +0900543 dbs_info = &per_cpu(od_cpu_dbs_info, j);
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700544 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
545 &dbs_info->prev_cpu_wall);
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500546 if (dbs_tuners_ins.ignore_nice)
Glauber Costa3292beb2011-11-28 14:45:17 -0200547 dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500548
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700549 }
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700550 return count;
551}
552
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200553static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
554 const char *buf, size_t count)
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400555{
David Ng8192a2f2012-01-19 14:16:19 -0800556 int input = 0;
557 int bypass = 0;
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530558 int ret, cpu, reenable_timer, j;
David Ng8192a2f2012-01-19 14:16:19 -0800559 struct cpu_dbs_info_s *dbs_info;
560
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530561 struct cpumask cpus_timer_done;
562 cpumask_clear(&cpus_timer_done);
563
David Ng8192a2f2012-01-19 14:16:19 -0800564 ret = sscanf(buf, "%d", &input);
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400565
566 if (ret != 1)
567 return -EINVAL;
568
David Ng8192a2f2012-01-19 14:16:19 -0800569 if (input >= POWERSAVE_BIAS_MAXLEVEL) {
570 input = POWERSAVE_BIAS_MAXLEVEL;
571 bypass = 1;
572 } else if (input <= POWERSAVE_BIAS_MINLEVEL) {
573 input = POWERSAVE_BIAS_MINLEVEL;
574 bypass = 1;
575 }
576
577 if (input == dbs_tuners_ins.powersave_bias) {
578 /* no change */
579 return count;
580 }
581
582 reenable_timer = ((dbs_tuners_ins.powersave_bias ==
583 POWERSAVE_BIAS_MAXLEVEL) ||
584 (dbs_tuners_ins.powersave_bias ==
585 POWERSAVE_BIAS_MINLEVEL));
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400586
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400587 dbs_tuners_ins.powersave_bias = input;
Matt Wagantall46aa0662013-05-31 20:02:01 -0700588
589 mutex_lock(&dbs_mutex);
Matt Wagantall0afbad12013-05-31 13:14:44 -0700590 get_online_cpus();
Matt Wagantall46aa0662013-05-31 20:02:01 -0700591
David Ng8192a2f2012-01-19 14:16:19 -0800592 if (!bypass) {
593 if (reenable_timer) {
594 /* reinstate dbs timer */
595 for_each_online_cpu(cpu) {
596 if (lock_policy_rwsem_write(cpu) < 0)
597 continue;
598
599 dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530600
601 for_each_cpu(j, &cpus_timer_done) {
602 if (!dbs_info->cur_policy) {
603 pr_err("Dbs policy is NULL\n");
604 goto skip_this_cpu;
605 }
606 if (cpumask_test_cpu(j, dbs_info->
607 cur_policy->cpus))
608 goto skip_this_cpu;
609 }
610
611 cpumask_set_cpu(cpu, &cpus_timer_done);
David Ng8192a2f2012-01-19 14:16:19 -0800612 if (dbs_info->cur_policy) {
613 /* restart dbs timer */
614 dbs_timer_init(dbs_info);
Rohit Gupta01585132013-06-17 17:56:27 -0700615 /* Enable frequency synchronization
616 * of CPUs */
617 atomic_set(&dbs_info->sync_enabled, 1);
David Ng8192a2f2012-01-19 14:16:19 -0800618 }
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530619skip_this_cpu:
David Ng8192a2f2012-01-19 14:16:19 -0800620 unlock_policy_rwsem_write(cpu);
621 }
622 }
623 ondemand_powersave_bias_init();
624 } else {
625 /* running at maximum or minimum frequencies; cancel
626 dbs timer as periodic load sampling is not necessary */
627 for_each_online_cpu(cpu) {
628 if (lock_policy_rwsem_write(cpu) < 0)
629 continue;
630
631 dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530632
633 for_each_cpu(j, &cpus_timer_done) {
634 if (!dbs_info->cur_policy) {
635 pr_err("Dbs policy is NULL\n");
636 goto skip_this_cpu_bypass;
637 }
638 if (cpumask_test_cpu(j, dbs_info->
639 cur_policy->cpus))
640 goto skip_this_cpu_bypass;
641 }
642
643 cpumask_set_cpu(cpu, &cpus_timer_done);
644
David Ng8192a2f2012-01-19 14:16:19 -0800645 if (dbs_info->cur_policy) {
646 /* cpu using ondemand, cancel dbs timer */
David Ng8192a2f2012-01-19 14:16:19 -0800647 dbs_timer_exit(dbs_info);
Rohit Gupta01585132013-06-17 17:56:27 -0700648 /* Disable frequency synchronization of
649 * CPUs to avoid re-queueing of work from
650 * sync_thread */
651 atomic_set(&dbs_info->sync_enabled, 0);
David Ng8192a2f2012-01-19 14:16:19 -0800652
Rohit Gupta3476e4f2013-06-17 16:57:08 -0700653 mutex_lock(&dbs_info->timer_mutex);
David Ng8192a2f2012-01-19 14:16:19 -0800654 ondemand_powersave_bias_setspeed(
655 dbs_info->cur_policy,
656 NULL,
657 input);
David Ng8192a2f2012-01-19 14:16:19 -0800658 mutex_unlock(&dbs_info->timer_mutex);
Rohit Gupta3476e4f2013-06-17 16:57:08 -0700659
David Ng8192a2f2012-01-19 14:16:19 -0800660 }
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530661skip_this_cpu_bypass:
David Ng8192a2f2012-01-19 14:16:19 -0800662 unlock_policy_rwsem_write(cpu);
663 }
664 }
Matt Wagantall46aa0662013-05-31 20:02:01 -0700665
Matt Wagantall0afbad12013-05-31 13:14:44 -0700666 put_online_cpus();
Matt Wagantall46aa0662013-05-31 20:02:01 -0700667 mutex_unlock(&dbs_mutex);
David Ng8192a2f2012-01-19 14:16:19 -0800668
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400669 return count;
670}
671
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200672define_one_global_rw(sampling_rate);
Linus Torvalds07d77752010-05-18 08:49:13 -0700673define_one_global_rw(io_is_busy);
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200674define_one_global_rw(up_threshold);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700675define_one_global_rw(down_differential);
David C Niemi3f78a9f2010-10-06 16:54:24 -0400676define_one_global_rw(sampling_down_factor);
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200677define_one_global_rw(ignore_nice_load);
678define_one_global_rw(powersave_bias);
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700679define_one_global_rw(up_threshold_multi_core);
680define_one_global_rw(optimal_freq);
681define_one_global_rw(up_threshold_any_cpu_load);
682define_one_global_rw(sync_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683
Dave Jones2b03f892009-01-18 01:43:44 -0500684static struct attribute *dbs_attributes[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 &sampling_rate_min.attr,
686 &sampling_rate.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 &up_threshold.attr,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700688 &down_differential.attr,
David C Niemi3f78a9f2010-10-06 16:54:24 -0400689 &sampling_down_factor.attr,
Alexander Clouter001893c2005-12-01 01:09:25 -0800690 &ignore_nice_load.attr,
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400691 &powersave_bias.attr,
Arjan van de Ven19379b12010-05-09 08:26:51 -0700692 &io_is_busy.attr,
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700693 &up_threshold_multi_core.attr,
694 &optimal_freq.attr,
695 &up_threshold_any_cpu_load.attr,
696 &sync_freq.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 NULL
698};
699
700static struct attribute_group dbs_attr_group = {
701 .attrs = dbs_attributes,
702 .name = "ondemand",
703};
704
705/************************** sysfs end ************************/
706
Mike Chan00e299f2010-01-26 17:06:47 -0800707static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
708{
709 if (dbs_tuners_ins.powersave_bias)
710 freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
711 else if (p->cur == p->max)
712 return;
713
714 __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ?
715 CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
716}
717
Venkatesh Pallipadi2f8a8352006-06-28 13:51:19 -0700718static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719{
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800720 /* Extrapolated load of this CPU */
721 unsigned int load_at_max_freq = 0;
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700722 unsigned int max_load_freq;
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800723 /* Current load across this CPU */
724 unsigned int cur_load = 0;
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700725 unsigned int max_load_other_cpu = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 struct cpufreq_policy *policy;
727 unsigned int j;
728
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400729 this_dbs_info->freq_lo = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 policy = this_dbs_info->cur_policy;
Venki Pallipadiea487612007-06-20 14:26:24 -0700731
Dave Jones32ee8c32006-02-28 00:43:23 -0500732 /*
Dave Jonesc29f1402005-05-31 19:03:50 -0700733 * Every sampling_rate, we check, if current idle time is less
734 * than 20% (default), then we try to increase frequency
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700735 * Every sampling_rate, we look for a the lowest
Dave Jonesc29f1402005-05-31 19:03:50 -0700736 * frequency which can sustain the load while keeping idle time over
737 * 30%. If such a frequency exist, we try to decrease to this frequency.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 *
Dave Jones32ee8c32006-02-28 00:43:23 -0500739 * Any frequency increase takes it to the maximum frequency.
740 * Frequency reduction happens at minimum steps of
741 * 5% (default) of current frequency
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 */
743
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700744 /* Get Absolute Load - in terms of freq */
745 max_load_freq = 0;
746
Rusty Russell835481d2009-01-04 05:18:06 -0800747 for_each_cpu(j, policy->cpus) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 struct cpu_dbs_info_s *j_dbs_info;
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700749 cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
750 unsigned int idle_time, wall_time, iowait_time;
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800751 unsigned int load_freq;
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700752 int freq_avg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753
Tejun Heo245b2e72009-06-24 15:13:48 +0900754 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700755
756 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700757 cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700758
Martin Schwidefsky64861632011-12-15 14:56:09 +0100759 wall_time = (unsigned int)
760 (cur_wall_time - j_dbs_info->prev_cpu_wall);
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700761 j_dbs_info->prev_cpu_wall = cur_wall_time;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
Martin Schwidefsky64861632011-12-15 14:56:09 +0100763 idle_time = (unsigned int)
764 (cur_idle_time - j_dbs_info->prev_cpu_idle);
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700765 j_dbs_info->prev_cpu_idle = cur_idle_time;
766
Martin Schwidefsky64861632011-12-15 14:56:09 +0100767 iowait_time = (unsigned int)
768 (cur_iowait_time - j_dbs_info->prev_cpu_iowait);
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700769 j_dbs_info->prev_cpu_iowait = cur_iowait_time;
770
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500771 if (dbs_tuners_ins.ignore_nice) {
Glauber Costa3292beb2011-11-28 14:45:17 -0200772 u64 cur_nice;
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500773 unsigned long cur_nice_jiffies;
774
Glauber Costa3292beb2011-11-28 14:45:17 -0200775 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
776 j_dbs_info->prev_cpu_nice;
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500777 /*
778 * Assumption: nice time between sampling periods will
779 * be less than 2^32 jiffies for 32 bit sys
780 */
781 cur_nice_jiffies = (unsigned long)
782 cputime64_to_jiffies64(cur_nice);
783
Glauber Costa3292beb2011-11-28 14:45:17 -0200784 j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500785 idle_time += jiffies_to_usecs(cur_nice_jiffies);
786 }
787
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700788 /*
789 * For the purpose of ondemand, waiting for disk IO is an
790 * indication that you're performance critical, and not that
791 * the system is actually idle. So subtract the iowait time
792 * from the cpu idle time.
793 */
794
Arjan van de Ven19379b12010-05-09 08:26:51 -0700795 if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time)
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700796 idle_time -= iowait_time;
797
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700798 if (unlikely(!wall_time || wall_time < idle_time))
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700799 continue;
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700800
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800801 cur_load = 100 * (wall_time - idle_time) / wall_time;
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700802 j_dbs_info->max_load = max(cur_load, j_dbs_info->prev_load);
803 j_dbs_info->prev_load = cur_load;
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700804 freq_avg = __cpufreq_driver_getavg(policy, j);
805 if (freq_avg <= 0)
806 freq_avg = policy->cur;
807
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800808 load_freq = cur_load * freq_avg;
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700809 if (load_freq > max_load_freq)
810 max_load_freq = load_freq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 }
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700812
813 for_each_online_cpu(j) {
814 struct cpu_dbs_info_s *j_dbs_info;
815 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
816
817 if (j == policy->cpu)
818 continue;
819
820 if (max_load_other_cpu < j_dbs_info->max_load)
821 max_load_other_cpu = j_dbs_info->max_load;
822 /*
823 * The other cpu could be running at higher frequency
824 * but may not have completed it's sampling_down_factor.
825 * For that case consider other cpu is loaded so that
826 * frequency imbalance does not occur.
827 */
828
829 if ((j_dbs_info->cur_policy != NULL)
830 && (j_dbs_info->cur_policy->cur ==
831 j_dbs_info->cur_policy->max)) {
832
833 if (policy->cur >= dbs_tuners_ins.optimal_freq)
834 max_load_other_cpu =
835 dbs_tuners_ins.up_threshold_any_cpu_load;
836 }
837 }
838
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800839 /* calculate the scaled load across CPU */
840 load_at_max_freq = (cur_load * policy->cur)/policy->cpuinfo.max_freq;
841
842 cpufreq_notify_utilization(policy, load_at_max_freq);
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700843 /* Check for frequency increase */
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700844 if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
David C Niemi3f78a9f2010-10-06 16:54:24 -0400845 /* If switching to max speed, apply sampling_down_factor */
846 if (policy->cur < policy->max)
847 this_dbs_info->rate_mult =
848 dbs_tuners_ins.sampling_down_factor;
Mike Chan00e299f2010-01-26 17:06:47 -0800849 dbs_freq_increase(policy, policy->max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 return;
851 }
852
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700853 if (num_online_cpus() > 1) {
854
855 if (max_load_other_cpu >
856 dbs_tuners_ins.up_threshold_any_cpu_load) {
857 if (policy->cur < dbs_tuners_ins.sync_freq)
858 dbs_freq_increase(policy,
859 dbs_tuners_ins.sync_freq);
860 return;
861 }
862
863 if (max_load_freq > dbs_tuners_ins.up_threshold_multi_core *
864 policy->cur) {
865 if (policy->cur < dbs_tuners_ins.optimal_freq)
866 dbs_freq_increase(policy,
867 dbs_tuners_ins.optimal_freq);
868 return;
869 }
870 }
871
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 /* Check for frequency decrease */
Dave Jonesc29f1402005-05-31 19:03:50 -0700873 /* if we cannot reduce the frequency anymore, break out early */
874 if (policy->cur == policy->min)
875 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876
Dave Jonesc29f1402005-05-31 19:03:50 -0700877 /*
878 * The optimal frequency is the frequency that is the lowest that
879 * can support the current CPU usage without triggering the up
880 * policy. To be safe, we focus 10 points under the threshold.
881 */
venkatesh.pallipadi@intel.come9d95bf2008-08-04 11:59:10 -0700882 if (max_load_freq <
883 (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) *
884 policy->cur) {
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700885 unsigned int freq_next;
venkatesh.pallipadi@intel.come9d95bf2008-08-04 11:59:10 -0700886 freq_next = max_load_freq /
887 (dbs_tuners_ins.up_threshold -
888 dbs_tuners_ins.down_differential);
Venkatesh Pallipadidfde5d62006-10-03 12:38:45 -0700889
David C Niemi3f78a9f2010-10-06 16:54:24 -0400890 /* No longer fully busy, reset rate_mult */
891 this_dbs_info->rate_mult = 1;
892
Nagananda.Chumbalkar@hp.com1dbf5882009-12-21 23:40:52 +0100893 if (freq_next < policy->min)
894 freq_next = policy->min;
895
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700896 if (num_online_cpus() > 1) {
897 if (max_load_other_cpu >
898 (dbs_tuners_ins.up_threshold_multi_core -
899 dbs_tuners_ins.down_differential) &&
900 freq_next < dbs_tuners_ins.sync_freq)
901 freq_next = dbs_tuners_ins.sync_freq;
902
903 if (max_load_freq >
Veena Sambasivan8aa24862013-05-14 12:36:48 -0700904 ((dbs_tuners_ins.up_threshold_multi_core -
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700905 dbs_tuners_ins.down_differential_multi_core) *
Veena Sambasivan8aa24862013-05-14 12:36:48 -0700906 policy->cur) &&
907 freq_next < dbs_tuners_ins.optimal_freq)
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700908 freq_next = dbs_tuners_ins.optimal_freq;
909
910 }
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400911 if (!dbs_tuners_ins.powersave_bias) {
912 __cpufreq_driver_target(policy, freq_next,
913 CPUFREQ_RELATION_L);
914 } else {
915 int freq = powersave_bias_target(policy, freq_next,
916 CPUFREQ_RELATION_L);
917 __cpufreq_driver_target(policy, freq,
918 CPUFREQ_RELATION_L);
919 }
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700920 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921}
922
David Howellsc4028952006-11-22 14:57:56 +0000923static void do_dbs_timer(struct work_struct *work)
Dave Jones32ee8c32006-02-28 00:43:23 -0500924{
Venkatesh Pallipadi529af7a2007-02-05 16:12:44 -0800925 struct cpu_dbs_info_s *dbs_info =
926 container_of(work, struct cpu_dbs_info_s, work.work);
927 unsigned int cpu = dbs_info->cpu;
928 int sample_type = dbs_info->sample_type;
929
Vincent Guittot5cb2c3b2011-02-07 17:14:25 +0100930 int delay;
Jocelyn Falempea665df92010-03-11 14:01:11 -0800931
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700932 mutex_lock(&dbs_info->timer_mutex);
Venkatesh Pallipadi56463b72007-02-05 16:12:45 -0800933
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400934 /* Common NORMAL_SAMPLE setup */
David Howellsc4028952006-11-22 14:57:56 +0000935 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400936 if (!dbs_tuners_ins.powersave_bias ||
David Howellsc4028952006-11-22 14:57:56 +0000937 sample_type == DBS_NORMAL_SAMPLE) {
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400938 dbs_check_cpu(dbs_info);
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400939 if (dbs_info->freq_lo) {
940 /* Setup timer for SUB_SAMPLE */
David Howellsc4028952006-11-22 14:57:56 +0000941 dbs_info->sample_type = DBS_SUB_SAMPLE;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400942 delay = dbs_info->freq_hi_jiffies;
Vincent Guittot5cb2c3b2011-02-07 17:14:25 +0100943 } else {
944 /* We want all CPUs to do sampling nearly on
945 * same jiffy
946 */
947 delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
948 * dbs_info->rate_mult);
949
950 if (num_online_cpus() > 1)
951 delay -= jiffies % delay;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400952 }
953 } else {
954 __cpufreq_driver_target(dbs_info->cur_policy,
Dave Jones2b03f892009-01-18 01:43:44 -0500955 dbs_info->freq_lo, CPUFREQ_RELATION_H);
Vincent Guittot5cb2c3b2011-02-07 17:14:25 +0100956 delay = dbs_info->freq_lo_jiffies;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400957 }
Matt Wagantall2aa4f052013-05-23 15:52:49 -0700958 queue_delayed_work_on(cpu, dbs_wq, &dbs_info->work, delay);
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700959 mutex_unlock(&dbs_info->timer_mutex);
Dave Jones32ee8c32006-02-28 00:43:23 -0500960}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961
Venkatesh Pallipadi529af7a2007-02-05 16:12:44 -0800962static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963{
Alexey Starikovskiy1ce28d62006-07-31 22:25:20 +0400964 /* We want all CPUs to do sampling nearly on same jiffy */
965 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
Jocelyn Falempea665df92010-03-11 14:01:11 -0800966
967 if (num_online_cpus() > 1)
968 delay -= jiffies % delay;
Venkatesh Pallipadi2f8a8352006-06-28 13:51:19 -0700969
David Howellsc4028952006-11-22 14:57:56 +0000970 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
Venki Pallipadi28287032007-05-08 00:27:47 -0700971 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
Matt Wagantall2aa4f052013-05-23 15:52:49 -0700972 queue_delayed_work_on(dbs_info->cpu, dbs_wq, &dbs_info->work, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973}
974
Linus Torvalds2cd7cbd2006-07-23 12:05:00 -0700975static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976{
Mathieu Desnoyersb14893a2009-05-17 10:30:45 -0400977 cancel_delayed_work_sync(&dbs_info->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978}
979
Arjan van de Ven19379b12010-05-09 08:26:51 -0700980/*
981 * Not all CPUs want IO time to be accounted as busy; this dependson how
982 * efficient idling at a higher frequency/voltage is.
983 * Pavel Machek says this is not so for various generations of AMD and old
984 * Intel systems.
985 * Mike Chan (androidlcom) calis this is also not true for ARM.
986 * Because of this, whitelist specific known (series) of CPUs by default, and
987 * leave all others up to the user.
988 */
989static int should_io_be_busy(void)
990{
991#if defined(CONFIG_X86)
992 /*
993 * For Intel, Core 2 (model 15) andl later have an efficient idle.
994 */
995 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
996 boot_cpu_data.x86 == 6 &&
997 boot_cpu_data.x86_model >= 15)
998 return 1;
999#endif
1000 return 0;
1001}
1002
Stephen Boydc8fc3012012-10-31 17:43:08 -07001003static void dbs_refresh_callback(struct work_struct *work)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001004{
1005 struct cpufreq_policy *policy;
1006 struct cpu_dbs_info_s *this_dbs_info;
Stephen Boydc8fc3012012-10-31 17:43:08 -07001007 struct dbs_work_struct *dbs_work;
1008 unsigned int cpu;
1009
1010 dbs_work = container_of(work, struct dbs_work_struct, work);
1011 cpu = dbs_work->cpu;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001012
Krishna Vankaa3e04d82012-06-08 11:35:43 +05301013 get_online_cpus();
1014
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001015 if (lock_policy_rwsem_write(cpu) < 0)
Krishna Vankaa3e04d82012-06-08 11:35:43 +05301016 goto bail_acq_sema_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001017
1018 this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
1019 policy = this_dbs_info->cur_policy;
David Ng4a0a0232011-08-03 14:04:43 -07001020 if (!policy) {
1021 /* CPU not using ondemand governor */
Krishna Vankaa3e04d82012-06-08 11:35:43 +05301022 goto bail_incorrect_governor;
David Ng4a0a0232011-08-03 14:04:43 -07001023 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001024
1025 if (policy->cur < policy->max) {
Anji Jonnalaf8732322012-12-13 14:03:54 +05301026 /*
1027 * Arch specific cpufreq driver may fail.
1028 * Don't update governor frequency upon failure.
1029 */
1030 if (__cpufreq_driver_target(policy, policy->max,
1031 CPUFREQ_RELATION_L) >= 0)
1032 policy->cur = policy->max;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001033
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001034 this_dbs_info->prev_cpu_idle = get_cpu_idle_time(cpu,
1035 &this_dbs_info->prev_cpu_wall);
1036 }
Krishna Vankaa3e04d82012-06-08 11:35:43 +05301037
1038bail_incorrect_governor:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001039 unlock_policy_rwsem_write(cpu);
Krishna Vankaa3e04d82012-06-08 11:35:43 +05301040
1041bail_acq_sema_failed:
1042 put_online_cpus();
1043 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001044}
1045
Rohit Guptac496cdd2013-04-04 15:45:16 -07001046static int dbs_migration_notify(struct notifier_block *nb,
1047 unsigned long target_cpu, void *arg)
1048{
Steve Muckle538cfc12013-05-31 10:39:31 -07001049 struct cpu_dbs_info_s *target_dbs_info =
1050 &per_cpu(od_cpu_dbs_info, target_cpu);
Rohit Guptac496cdd2013-04-04 15:45:16 -07001051
Steve Muckle538cfc12013-05-31 10:39:31 -07001052 atomic_set(&target_dbs_info->src_sync_cpu, (int)arg);
1053 wake_up(&target_dbs_info->sync_wq);
Rohit Guptac496cdd2013-04-04 15:45:16 -07001054
1055 return NOTIFY_OK;
1056}
1057
1058static struct notifier_block dbs_migration_nb = {
1059 .notifier_call = dbs_migration_notify,
1060};
1061
Steve Muckle538cfc12013-05-31 10:39:31 -07001062static int sync_pending(struct cpu_dbs_info_s *this_dbs_info)
Rohit Guptac496cdd2013-04-04 15:45:16 -07001063{
Steve Muckle538cfc12013-05-31 10:39:31 -07001064 return atomic_read(&this_dbs_info->src_sync_cpu) >= 0;
1065}
1066
1067static int dbs_sync_thread(void *data)
1068{
1069 int src_cpu, cpu = (int)data;
Rohit Guptac496cdd2013-04-04 15:45:16 -07001070 unsigned int src_freq, src_max_load;
Steve Muckle538cfc12013-05-31 10:39:31 -07001071 struct cpu_dbs_info_s *this_dbs_info, *src_dbs_info;
1072 struct cpufreq_policy *policy;
Rohit Guptac496cdd2013-04-04 15:45:16 -07001073 int delay;
1074
Rohit Guptac496cdd2013-04-04 15:45:16 -07001075 this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
Rohit Guptac496cdd2013-04-04 15:45:16 -07001076
Steve Muckle538cfc12013-05-31 10:39:31 -07001077 while (1) {
1078 wait_event(this_dbs_info->sync_wq,
1079 sync_pending(this_dbs_info) ||
1080 kthread_should_stop());
Rohit Guptac496cdd2013-04-04 15:45:16 -07001081
Steve Muckle538cfc12013-05-31 10:39:31 -07001082 if (kthread_should_stop())
1083 break;
Rohit Guptac496cdd2013-04-04 15:45:16 -07001084
Steve Muckle538cfc12013-05-31 10:39:31 -07001085 get_online_cpus();
Rohit Guptac496cdd2013-04-04 15:45:16 -07001086
Steve Muckle538cfc12013-05-31 10:39:31 -07001087 src_cpu = atomic_read(&this_dbs_info->src_sync_cpu);
1088 src_dbs_info = &per_cpu(od_cpu_dbs_info, src_cpu);
1089 if (src_dbs_info != NULL &&
1090 src_dbs_info->cur_policy != NULL) {
1091 src_freq = src_dbs_info->cur_policy->cur;
1092 src_max_load = src_dbs_info->max_load;
1093 } else {
1094 src_freq = dbs_tuners_ins.sync_freq;
1095 src_max_load = 0;
1096 }
Rohit Guptac496cdd2013-04-04 15:45:16 -07001097
Steve Muckle538cfc12013-05-31 10:39:31 -07001098 if (lock_policy_rwsem_write(cpu) < 0)
1099 goto bail_acq_sema_failed;
1100
Rohit Gupta01585132013-06-17 17:56:27 -07001101 if (!atomic_read(&this_dbs_info->sync_enabled)) {
1102 atomic_set(&this_dbs_info->src_sync_cpu, -1);
1103 put_online_cpus();
1104 unlock_policy_rwsem_write(cpu);
1105 continue;
1106 }
1107
Steve Muckle538cfc12013-05-31 10:39:31 -07001108 policy = this_dbs_info->cur_policy;
1109 if (!policy) {
1110 /* CPU not using ondemand governor */
1111 goto bail_incorrect_governor;
1112 }
1113 delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
1114
1115
1116 if (policy->cur < src_freq) {
1117 /* cancel the next ondemand sample */
1118 cancel_delayed_work_sync(&this_dbs_info->work);
1119
1120 /*
1121 * Arch specific cpufreq driver may fail.
1122 * Don't update governor frequency upon failure.
1123 */
1124 if (__cpufreq_driver_target(policy, src_freq,
1125 CPUFREQ_RELATION_L) >= 0) {
1126 policy->cur = src_freq;
1127 if (src_max_load > this_dbs_info->max_load) {
1128 this_dbs_info->max_load = src_max_load;
1129 this_dbs_info->prev_load = src_max_load;
1130 }
1131 }
1132
1133 /* reschedule the next ondemand sample */
1134 mutex_lock(&this_dbs_info->timer_mutex);
1135 queue_delayed_work_on(cpu, dbs_wq,
1136 &this_dbs_info->work, delay);
1137 mutex_unlock(&this_dbs_info->timer_mutex);
1138 }
1139
1140bail_incorrect_governor:
1141 unlock_policy_rwsem_write(cpu);
Rohit Guptac496cdd2013-04-04 15:45:16 -07001142bail_acq_sema_failed:
Steve Muckle538cfc12013-05-31 10:39:31 -07001143 put_online_cpus();
1144 atomic_set(&this_dbs_info->src_sync_cpu, -1);
1145 }
1146
1147 return 0;
Rohit Guptac496cdd2013-04-04 15:45:16 -07001148}
1149
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001150static void dbs_input_event(struct input_handle *handle, unsigned int type,
1151 unsigned int code, int value)
1152{
Matt Wagantall2100f002012-10-19 15:26:48 -07001153 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001154
David Ng8192a2f2012-01-19 14:16:19 -08001155 if ((dbs_tuners_ins.powersave_bias == POWERSAVE_BIAS_MAXLEVEL) ||
1156 (dbs_tuners_ins.powersave_bias == POWERSAVE_BIAS_MINLEVEL)) {
1157 /* nothing to do */
1158 return;
1159 }
1160
Stephen Boydc8fc3012012-10-31 17:43:08 -07001161 for_each_online_cpu(i)
Matt Wagantall2aa4f052013-05-23 15:52:49 -07001162 queue_work_on(i, dbs_wq, &per_cpu(dbs_refresh_work, i).work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001163}
1164
1165static int dbs_input_connect(struct input_handler *handler,
1166 struct input_dev *dev, const struct input_device_id *id)
1167{
1168 struct input_handle *handle;
1169 int error;
1170
1171 handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
1172 if (!handle)
1173 return -ENOMEM;
1174
1175 handle->dev = dev;
1176 handle->handler = handler;
1177 handle->name = "cpufreq";
1178
1179 error = input_register_handle(handle);
1180 if (error)
1181 goto err2;
1182
1183 error = input_open_device(handle);
1184 if (error)
1185 goto err1;
1186
1187 return 0;
1188err1:
1189 input_unregister_handle(handle);
1190err2:
1191 kfree(handle);
1192 return error;
1193}
1194
1195static void dbs_input_disconnect(struct input_handle *handle)
1196{
1197 input_close_device(handle);
1198 input_unregister_handle(handle);
1199 kfree(handle);
1200}
1201
1202static const struct input_device_id dbs_ids[] = {
Tingwei Zhangcb74f482013-07-03 16:28:24 +08001203 /* multi-touch touchscreen */
1204 {
1205 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
1206 INPUT_DEVICE_ID_MATCH_ABSBIT,
1207 .evbit = { BIT_MASK(EV_ABS) },
1208 .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
1209 BIT_MASK(ABS_MT_POSITION_X) |
1210 BIT_MASK(ABS_MT_POSITION_Y) },
1211 },
1212 /* touchpad */
1213 {
1214 .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
1215 INPUT_DEVICE_ID_MATCH_ABSBIT,
1216 .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
1217 .absbit = { [BIT_WORD(ABS_X)] =
1218 BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
1219 },
1220 /* Keypad */
1221 {
1222 .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
1223 .evbit = { BIT_MASK(EV_KEY) },
1224 },
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001225 { },
1226};
1227
1228static struct input_handler dbs_input_handler = {
1229 .event = dbs_input_event,
1230 .connect = dbs_input_connect,
1231 .disconnect = dbs_input_disconnect,
1232 .name = "cpufreq_ond",
1233 .id_table = dbs_ids,
1234};
1235
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
1237 unsigned int event)
1238{
1239 unsigned int cpu = policy->cpu;
1240 struct cpu_dbs_info_s *this_dbs_info;
1241 unsigned int j;
Jeff Garzik914f7c32006-10-20 14:31:00 -07001242 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243
Tejun Heo245b2e72009-06-24 15:13:48 +09001244 this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245
1246 switch (event) {
1247 case CPUFREQ_GOV_START:
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -07001248 if ((!cpu_online(cpu)) || (!policy->cur))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 return -EINVAL;
1250
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08001251 mutex_lock(&dbs_mutex);
Jeff Garzik914f7c32006-10-20 14:31:00 -07001252
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -07001253 dbs_enable++;
Rusty Russell835481d2009-01-04 05:18:06 -08001254 for_each_cpu(j, policy->cpus) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 struct cpu_dbs_info_s *j_dbs_info;
Tejun Heo245b2e72009-06-24 15:13:48 +09001256 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 j_dbs_info->cur_policy = policy;
Dave Jones32ee8c32006-02-28 00:43:23 -05001258
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -07001259 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
1260 &j_dbs_info->prev_cpu_wall);
Glauber Costa3292beb2011-11-28 14:45:17 -02001261 if (dbs_tuners_ins.ignore_nice)
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -05001262 j_dbs_info->prev_cpu_nice =
Glauber Costa3292beb2011-11-28 14:45:17 -02001263 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
Steve Muckle538cfc12013-05-31 10:39:31 -07001264 set_cpus_allowed(j_dbs_info->sync_thread,
1265 *cpumask_of(j));
Rohit Gupta01585132013-06-17 17:56:27 -07001266 if (!dbs_tuners_ins.powersave_bias)
1267 atomic_set(&j_dbs_info->sync_enabled, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 }
Venkatesh Pallipadi529af7a2007-02-05 16:12:44 -08001269 this_dbs_info->cpu = cpu;
David C Niemi3f78a9f2010-10-06 16:54:24 -04001270 this_dbs_info->rate_mult = 1;
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -07001271 ondemand_powersave_bias_init_cpu(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 /*
1273 * Start the timerschedule work, when this governor
1274 * is used for first time
1275 */
1276 if (dbs_enable == 1) {
1277 unsigned int latency;
Thomas Renninger0e625ac2009-07-24 15:25:06 +02001278
1279 rc = sysfs_create_group(cpufreq_global_kobject,
1280 &dbs_attr_group);
1281 if (rc) {
1282 mutex_unlock(&dbs_mutex);
1283 return rc;
1284 }
1285
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 /* policy latency is in nS. Convert it to uS first */
Dave Jonesdf8b59b2005-09-20 12:39:35 -07001287 latency = policy->cpuinfo.transition_latency / 1000;
1288 if (latency == 0)
1289 latency = 1;
Thomas Renningercef96152009-04-22 13:48:29 +02001290 /* Bring kernel and HW constraints together */
1291 min_sampling_rate = max(min_sampling_rate,
1292 MIN_LATENCY_MULTIPLIER * latency);
1293 dbs_tuners_ins.sampling_rate =
1294 max(min_sampling_rate,
1295 latency * LATENCY_MULTIPLIER);
Arjan van de Ven19379b12010-05-09 08:26:51 -07001296 dbs_tuners_ins.io_is_busy = should_io_be_busy();
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -07001297
1298 if (dbs_tuners_ins.optimal_freq == 0)
1299 dbs_tuners_ins.optimal_freq = policy->min;
1300
1301 if (dbs_tuners_ins.sync_freq == 0)
1302 dbs_tuners_ins.sync_freq = policy->min;
Rohit Guptac496cdd2013-04-04 15:45:16 -07001303
1304 atomic_notifier_chain_register(&migration_notifier_head,
1305 &dbs_migration_nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001307 if (!cpu)
1308 rc = input_register_handler(&dbs_input_handler);
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08001309 mutex_unlock(&dbs_mutex);
venkatesh.pallipadi@intel.com7d26e2d2009-07-02 17:08:30 -07001310
David Ng8192a2f2012-01-19 14:16:19 -08001311
1312 if (!ondemand_powersave_bias_setspeed(
1313 this_dbs_info->cur_policy,
1314 NULL,
1315 dbs_tuners_ins.powersave_bias))
1316 dbs_timer_init(this_dbs_info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 break;
1318
1319 case CPUFREQ_GOV_STOP:
Linus Torvalds2cd7cbd2006-07-23 12:05:00 -07001320 dbs_timer_exit(this_dbs_info);
venkatesh.pallipadi@intel.com7d26e2d2009-07-02 17:08:30 -07001321
1322 mutex_lock(&dbs_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 dbs_enable--;
Steve Muckle538cfc12013-05-31 10:39:31 -07001324
1325 for_each_cpu(j, policy->cpus) {
1326 struct cpu_dbs_info_s *j_dbs_info;
1327 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
1328 atomic_set(&j_dbs_info->sync_enabled, 0);
1329 }
1330
Anitha Anand3dd65092012-01-18 17:17:40 -08001331 /* If device is being removed, policy is no longer
1332 * valid. */
1333 this_dbs_info->cur_policy = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001334 if (!cpu)
1335 input_unregister_handler(&dbs_input_handler);
Rohit Guptac496cdd2013-04-04 15:45:16 -07001336 if (!dbs_enable) {
Thomas Renninger0e625ac2009-07-24 15:25:06 +02001337 sysfs_remove_group(cpufreq_global_kobject,
1338 &dbs_attr_group);
Rohit Guptac496cdd2013-04-04 15:45:16 -07001339 atomic_notifier_chain_unregister(
1340 &migration_notifier_head,
1341 &dbs_migration_nb);
1342 }
1343
Venkat Devarasetty4edc7662013-01-30 18:08:51 +05301344 mutex_unlock(&dbs_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345
1346 break;
1347
1348 case CPUFREQ_GOV_LIMITS:
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -07001349 mutex_lock(&this_dbs_info->timer_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 if (policy->max < this_dbs_info->cur_policy->cur)
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -07001351 __cpufreq_driver_target(this_dbs_info->cur_policy,
Dave Jones2b03f892009-01-18 01:43:44 -05001352 policy->max, CPUFREQ_RELATION_H);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353 else if (policy->min > this_dbs_info->cur_policy->cur)
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -07001354 __cpufreq_driver_target(this_dbs_info->cur_policy,
Dave Jones2b03f892009-01-18 01:43:44 -05001355 policy->min, CPUFREQ_RELATION_L);
David Ng8192a2f2012-01-19 14:16:19 -08001356 else if (dbs_tuners_ins.powersave_bias != 0)
1357 ondemand_powersave_bias_setspeed(
1358 this_dbs_info->cur_policy,
1359 policy,
1360 dbs_tuners_ins.powersave_bias);
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -07001361 mutex_unlock(&this_dbs_info->timer_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 break;
1363 }
1364 return 0;
1365}
1366
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367static int __init cpufreq_gov_dbs_init(void)
1368{
Andrea Righi4f6e6b92008-09-18 10:43:40 +00001369 u64 idle_time;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001370 unsigned int i;
Andrea Righi4f6e6b92008-09-18 10:43:40 +00001371 int cpu = get_cpu();
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -07001372
Kamalesh Babulal21f2e3c2011-12-09 16:18:42 +05301373 idle_time = get_cpu_idle_time_us(cpu, NULL);
Andrea Righi4f6e6b92008-09-18 10:43:40 +00001374 put_cpu();
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -07001375 if (idle_time != -1ULL) {
1376 /* Idle micro accounting is supported. Use finer thresholds */
1377 dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
1378 dbs_tuners_ins.down_differential =
1379 MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
Thomas Renningercef96152009-04-22 13:48:29 +02001380 /*
Paul Bollebd74b322011-08-06 14:33:43 +02001381 * In nohz/micro accounting case we set the minimum frequency
Thomas Renningercef96152009-04-22 13:48:29 +02001382 * not depending on HZ, but fixed (very low). The deferred
1383 * timer might skip some samples if idle/sleeping as needed.
1384 */
1385 min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
1386 } else {
1387 /* For correct statistics, we need 10 ticks for each measure */
1388 min_sampling_rate =
1389 MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -07001390 }
Akinobu Mita888a7942008-07-14 12:00:45 +09001391
Matt Wagantall2aa4f052013-05-23 15:52:49 -07001392 dbs_wq = alloc_workqueue("ondemand_dbs_wq", WQ_HIGHPRI, 0);
1393 if (!dbs_wq) {
1394 printk(KERN_ERR "Failed to create ondemand_dbs_wq workqueue\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001395 return -EFAULT;
1396 }
1397 for_each_possible_cpu(i) {
Praveen Chidambaram457a4452012-07-19 10:45:07 -06001398 struct cpu_dbs_info_s *this_dbs_info =
1399 &per_cpu(od_cpu_dbs_info, i);
Stephen Boydc8fc3012012-10-31 17:43:08 -07001400 struct dbs_work_struct *dbs_work =
1401 &per_cpu(dbs_refresh_work, i);
1402
Praveen Chidambaram457a4452012-07-19 10:45:07 -06001403 mutex_init(&this_dbs_info->timer_mutex);
Stephen Boydc8fc3012012-10-31 17:43:08 -07001404 INIT_WORK(&dbs_work->work, dbs_refresh_callback);
1405 dbs_work->cpu = i;
Rohit Guptac496cdd2013-04-04 15:45:16 -07001406
Steve Muckle538cfc12013-05-31 10:39:31 -07001407 atomic_set(&this_dbs_info->src_sync_cpu, -1);
1408 init_waitqueue_head(&this_dbs_info->sync_wq);
Rohit Guptac496cdd2013-04-04 15:45:16 -07001409
Steve Muckle538cfc12013-05-31 10:39:31 -07001410 this_dbs_info->sync_thread = kthread_run(dbs_sync_thread,
1411 (void *)i,
1412 "dbs_sync/%d", i);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001413 }
1414
Tejun Heo57df5572011-01-26 12:12:50 +01001415 return cpufreq_register_governor(&cpufreq_gov_ondemand);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416}
1417
1418static void __exit cpufreq_gov_dbs_exit(void)
1419{
Anji Jonnala4c1485f2012-11-14 13:34:54 +05301420 unsigned int i;
1421
Thomas Renninger1c256242007-10-02 13:28:12 -07001422 cpufreq_unregister_governor(&cpufreq_gov_ondemand);
Anji Jonnala4c1485f2012-11-14 13:34:54 +05301423 for_each_possible_cpu(i) {
1424 struct cpu_dbs_info_s *this_dbs_info =
1425 &per_cpu(od_cpu_dbs_info, i);
1426 mutex_destroy(&this_dbs_info->timer_mutex);
Steve Muckle538cfc12013-05-31 10:39:31 -07001427 kthread_stop(this_dbs_info->sync_thread);
Anji Jonnala4c1485f2012-11-14 13:34:54 +05301428 }
Matt Wagantall2aa4f052013-05-23 15:52:49 -07001429 destroy_workqueue(dbs_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430}
1431
1432
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -07001433MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
1434MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
1435MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
Dave Jones2b03f892009-01-18 01:43:44 -05001436 "Low Latency Frequency Transition capable processors");
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -07001437MODULE_LICENSE("GPL");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438
Johannes Weiner69157192008-01-17 15:21:08 -08001439#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
1440fs_initcall(cpufreq_gov_dbs_init);
1441#else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442module_init(cpufreq_gov_dbs_init);
Johannes Weiner69157192008-01-17 15:21:08 -08001443#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444module_exit(cpufreq_gov_dbs_exit);