blob: 0482bd49aba85dfd3fb735e8a9acefd48e522a8b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * drivers/cpufreq/cpufreq_ondemand.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/smp.h>
16#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/ctype.h>
19#include <linux/cpufreq.h>
20#include <linux/sysctl.h>
21#include <linux/types.h>
22#include <linux/fs.h>
23#include <linux/sysfs.h>
24#include <linux/sched.h>
25#include <linux/kmod.h>
26#include <linux/workqueue.h>
27#include <linux/jiffies.h>
28#include <linux/kernel_stat.h>
29#include <linux/percpu.h>
30
31/*
32 * dbs is used in this file as a shortform for demandbased switching
33 * It helps to keep variable names smaller, simpler
34 */
35
36#define DEF_FREQUENCY_UP_THRESHOLD (80)
37#define MIN_FREQUENCY_UP_THRESHOLD (0)
38#define MAX_FREQUENCY_UP_THRESHOLD (100)
39
40#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
41#define MIN_FREQUENCY_DOWN_THRESHOLD (0)
42#define MAX_FREQUENCY_DOWN_THRESHOLD (100)
43
44/*
45 * The polling frequency of this governor depends on the capability of
46 * the processor. Default polling frequency is 1000 times the transition
47 * latency of the processor. The governor will work on any processor with
48 * transition latency <= 10mS, using appropriate sampling
49 * rate.
50 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
51 * this governor will not work.
52 * All times here are in uS.
53 */
54static unsigned int def_sampling_rate;
55#define MIN_SAMPLING_RATE (def_sampling_rate / 2)
56#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
57#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
58#define DEF_SAMPLING_DOWN_FACTOR (10)
59#define TRANSITION_LATENCY_LIMIT (10 * 1000)
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
61static void do_dbs_timer(void *data);
62
63struct cpu_dbs_info_s {
64 struct cpufreq_policy *cur_policy;
65 unsigned int prev_cpu_idle_up;
66 unsigned int prev_cpu_idle_down;
67 unsigned int enable;
68};
69static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
70
71static unsigned int dbs_enable; /* number of CPUs using this policy */
72
73static DECLARE_MUTEX (dbs_sem);
74static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
75
76struct dbs_tuners {
77 unsigned int sampling_rate;
78 unsigned int sampling_down_factor;
79 unsigned int up_threshold;
80 unsigned int down_threshold;
Dave Jones3d5ee9e2005-05-31 19:03:47 -070081 unsigned int ignore_nice;
Dave Jones1206aaa2005-05-31 19:03:48 -070082 unsigned int freq_step;
Linus Torvalds1da177e2005-04-16 15:20:36 -070083};
84
85static struct dbs_tuners dbs_tuners_ins = {
86 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
87 .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
88 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
89};
90
Dave Jonesdac1c1a2005-05-31 19:03:49 -070091static inline unsigned int get_cpu_idle_time(unsigned int cpu)
92{
93 return kstat_cpu(cpu).cpustat.idle +
94 kstat_cpu(cpu).cpustat.iowait +
95 ( !dbs_tuners_ins.ignore_nice ?
96 kstat_cpu(cpu).cpustat.nice :
97 0);
98}
99
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100/************************** sysfs interface ************************/
101static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
102{
103 return sprintf (buf, "%u\n", MAX_SAMPLING_RATE);
104}
105
106static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
107{
108 return sprintf (buf, "%u\n", MIN_SAMPLING_RATE);
109}
110
111#define define_one_ro(_name) \
112static struct freq_attr _name = \
113__ATTR(_name, 0444, show_##_name, NULL)
114
115define_one_ro(sampling_rate_max);
116define_one_ro(sampling_rate_min);
117
118/* cpufreq_ondemand Governor Tunables */
119#define show_one(file_name, object) \
120static ssize_t show_##file_name \
121(struct cpufreq_policy *unused, char *buf) \
122{ \
123 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
124}
125show_one(sampling_rate, sampling_rate);
126show_one(sampling_down_factor, sampling_down_factor);
127show_one(up_threshold, up_threshold);
128show_one(down_threshold, down_threshold);
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700129show_one(ignore_nice, ignore_nice);
Dave Jones1206aaa2005-05-31 19:03:48 -0700130show_one(freq_step, freq_step);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131
132static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
133 const char *buf, size_t count)
134{
135 unsigned int input;
136 int ret;
137 ret = sscanf (buf, "%u", &input);
138 if (ret != 1 )
139 return -EINVAL;
140
141 down(&dbs_sem);
142 dbs_tuners_ins.sampling_down_factor = input;
143 up(&dbs_sem);
144
145 return count;
146}
147
148static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
149 const char *buf, size_t count)
150{
151 unsigned int input;
152 int ret;
153 ret = sscanf (buf, "%u", &input);
154
155 down(&dbs_sem);
156 if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) {
157 up(&dbs_sem);
158 return -EINVAL;
159 }
160
161 dbs_tuners_ins.sampling_rate = input;
162 up(&dbs_sem);
163
164 return count;
165}
166
167static ssize_t store_up_threshold(struct cpufreq_policy *unused,
168 const char *buf, size_t count)
169{
170 unsigned int input;
171 int ret;
172 ret = sscanf (buf, "%u", &input);
173
174 down(&dbs_sem);
175 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
176 input < MIN_FREQUENCY_UP_THRESHOLD ||
177 input <= dbs_tuners_ins.down_threshold) {
178 up(&dbs_sem);
179 return -EINVAL;
180 }
181
182 dbs_tuners_ins.up_threshold = input;
183 up(&dbs_sem);
184
185 return count;
186}
187
188static ssize_t store_down_threshold(struct cpufreq_policy *unused,
189 const char *buf, size_t count)
190{
191 unsigned int input;
192 int ret;
193 ret = sscanf (buf, "%u", &input);
194
195 down(&dbs_sem);
196 if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD ||
197 input < MIN_FREQUENCY_DOWN_THRESHOLD ||
198 input >= dbs_tuners_ins.up_threshold) {
199 up(&dbs_sem);
200 return -EINVAL;
201 }
202
203 dbs_tuners_ins.down_threshold = input;
204 up(&dbs_sem);
205
206 return count;
207}
208
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700209static ssize_t store_ignore_nice(struct cpufreq_policy *policy,
210 const char *buf, size_t count)
211{
212 unsigned int input;
213 int ret;
214
215 unsigned int j;
216
217 ret = sscanf (buf, "%u", &input);
218 if ( ret != 1 )
219 return -EINVAL;
220
221 if ( input > 1 )
222 input = 1;
223
224 down(&dbs_sem);
225 if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
226 up(&dbs_sem);
227 return count;
228 }
229 dbs_tuners_ins.ignore_nice = input;
230
231 /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */
Dave Jonesdac1c1a2005-05-31 19:03:49 -0700232 for_each_online_cpu(j) {
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700233 struct cpu_dbs_info_s *j_dbs_info;
234 j_dbs_info = &per_cpu(cpu_dbs_info, j);
Dave Jonesdac1c1a2005-05-31 19:03:49 -0700235 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700236 j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
237 }
238 up(&dbs_sem);
239
240 return count;
241}
242
Dave Jones1206aaa2005-05-31 19:03:48 -0700243static ssize_t store_freq_step(struct cpufreq_policy *policy,
244 const char *buf, size_t count)
245{
246 unsigned int input;
247 int ret;
248
249 ret = sscanf (buf, "%u", &input);
250
251 if ( ret != 1 )
252 return -EINVAL;
253
254 if ( input > 100 )
255 input = 100;
256
257 /* no need to test here if freq_step is zero as the user might actually
258 * want this, they would be crazy though :) */
259 down(&dbs_sem);
260 dbs_tuners_ins.freq_step = input;
261 up(&dbs_sem);
262
263 return count;
264}
265
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266#define define_one_rw(_name) \
267static struct freq_attr _name = \
268__ATTR(_name, 0644, show_##_name, store_##_name)
269
270define_one_rw(sampling_rate);
271define_one_rw(sampling_down_factor);
272define_one_rw(up_threshold);
273define_one_rw(down_threshold);
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700274define_one_rw(ignore_nice);
Dave Jones1206aaa2005-05-31 19:03:48 -0700275define_one_rw(freq_step);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
277static struct attribute * dbs_attributes[] = {
278 &sampling_rate_max.attr,
279 &sampling_rate_min.attr,
280 &sampling_rate.attr,
281 &sampling_down_factor.attr,
282 &up_threshold.attr,
283 &down_threshold.attr,
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700284 &ignore_nice.attr,
Dave Jones1206aaa2005-05-31 19:03:48 -0700285 &freq_step.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 NULL
287};
288
289static struct attribute_group dbs_attr_group = {
290 .attrs = dbs_attributes,
291 .name = "ondemand",
292};
293
294/************************** sysfs end ************************/
295
296static void dbs_check_cpu(int cpu)
297{
298 unsigned int idle_ticks, up_idle_ticks, down_idle_ticks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 unsigned int freq_down_step;
300 unsigned int freq_down_sampling_rate;
301 static int down_skip[NR_CPUS];
302 struct cpu_dbs_info_s *this_dbs_info;
303
304 struct cpufreq_policy *policy;
305 unsigned int j;
306
307 this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
308 if (!this_dbs_info->enable)
309 return;
310
311 policy = this_dbs_info->cur_policy;
312 /*
313 * The default safe range is 20% to 80%
314 * Every sampling_rate, we check
315 * - If current idle time is less than 20%, then we try to
316 * increase frequency
317 * Every sampling_rate*sampling_down_factor, we check
318 * - If current idle time is more than 80%, then we try to
319 * decrease frequency
320 *
321 * Any frequency increase takes it to the maximum frequency.
322 * Frequency reduction happens at minimum steps of
Dave Jones1206aaa2005-05-31 19:03:48 -0700323 * 5% (default) of max_frequency
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 */
325
326 /* Check for frequency increase */
Dave Jones9c7d2692005-05-31 19:03:49 -0700327 idle_ticks = UINT_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 for_each_cpu_mask(j, policy->cpus) {
Dave Jones9c7d2692005-05-31 19:03:49 -0700329 unsigned int tmp_idle_ticks, total_idle_ticks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 struct cpu_dbs_info_s *j_dbs_info;
331
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 j_dbs_info = &per_cpu(cpu_dbs_info, j);
Dave Jonesdac1c1a2005-05-31 19:03:49 -0700333 total_idle_ticks = get_cpu_idle_time(j);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 tmp_idle_ticks = total_idle_ticks -
335 j_dbs_info->prev_cpu_idle_up;
336 j_dbs_info->prev_cpu_idle_up = total_idle_ticks;
337
338 if (tmp_idle_ticks < idle_ticks)
339 idle_ticks = tmp_idle_ticks;
340 }
341
342 /* Scale idle ticks by 100 and compare with up and down ticks */
343 idle_ticks *= 100;
344 up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) *
Dave Jones6fe71162005-05-31 19:03:44 -0700345 usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
347 if (idle_ticks < up_idle_ticks) {
Dave Jonesdac1c1a2005-05-31 19:03:49 -0700348 down_skip[cpu] = 0;
Dave Jones790d76f2005-05-31 19:03:49 -0700349 for_each_cpu_mask(j, policy->cpus) {
350 struct cpu_dbs_info_s *j_dbs_info;
351
352 j_dbs_info = &per_cpu(cpu_dbs_info, j);
353 j_dbs_info->prev_cpu_idle_down =
354 j_dbs_info->prev_cpu_idle_up;
355 }
Dave Jonesc11420a2005-05-31 19:03:48 -0700356 /* if we are already at full speed then break out early */
357 if (policy->cur == policy->max)
358 return;
359
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 __cpufreq_driver_target(policy, policy->max,
361 CPUFREQ_RELATION_H);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 return;
363 }
364
365 /* Check for frequency decrease */
366 down_skip[cpu]++;
367 if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor)
368 return;
369
Dave Jones9c7d2692005-05-31 19:03:49 -0700370 idle_ticks = UINT_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 for_each_cpu_mask(j, policy->cpus) {
Dave Jones9c7d2692005-05-31 19:03:49 -0700372 unsigned int tmp_idle_ticks, total_idle_ticks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 struct cpu_dbs_info_s *j_dbs_info;
374
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 j_dbs_info = &per_cpu(cpu_dbs_info, j);
Dave Jonesdac1c1a2005-05-31 19:03:49 -0700376 /* Check for frequency decrease */
377 total_idle_ticks = j_dbs_info->prev_cpu_idle_up;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 tmp_idle_ticks = total_idle_ticks -
379 j_dbs_info->prev_cpu_idle_down;
380 j_dbs_info->prev_cpu_idle_down = total_idle_ticks;
381
382 if (tmp_idle_ticks < idle_ticks)
383 idle_ticks = tmp_idle_ticks;
384 }
385
386 /* Scale idle ticks by 100 and compare with up and down ticks */
387 idle_ticks *= 100;
388 down_skip[cpu] = 0;
389
390 freq_down_sampling_rate = dbs_tuners_ins.sampling_rate *
391 dbs_tuners_ins.sampling_down_factor;
392 down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) *
Dave Jonesdac1c1a2005-05-31 19:03:49 -0700393 usecs_to_jiffies(freq_down_sampling_rate);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394
Dave Jones9c7d2692005-05-31 19:03:49 -0700395 if (idle_ticks > down_idle_ticks) {
Dave Jones1206aaa2005-05-31 19:03:48 -0700396 /* if we are already at the lowest speed then break out early
397 * or if we 'cannot' reduce the speed as the user might want
398 * freq_step to be zero */
399 if (policy->cur == policy->min || dbs_tuners_ins.freq_step == 0)
Dave Jonesc11420a2005-05-31 19:03:48 -0700400 return;
Dave Jones1206aaa2005-05-31 19:03:48 -0700401
402 freq_down_step = (dbs_tuners_ins.freq_step * policy->max) / 100;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403
404 /* max freq cannot be less than 100. But who knows.... */
405 if (unlikely(freq_down_step == 0))
406 freq_down_step = 5;
407
408 __cpufreq_driver_target(policy,
Dave Jones1206aaa2005-05-31 19:03:48 -0700409 policy->cur - freq_down_step,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 CPUFREQ_RELATION_H);
411 return;
412 }
413}
414
415static void do_dbs_timer(void *data)
416{
417 int i;
418 down(&dbs_sem);
Dave Jones6fe71162005-05-31 19:03:44 -0700419 for_each_online_cpu(i)
420 dbs_check_cpu(i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 schedule_delayed_work(&dbs_work,
Dave Jones6fe71162005-05-31 19:03:44 -0700422 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 up(&dbs_sem);
424}
425
426static inline void dbs_timer_init(void)
427{
428 INIT_WORK(&dbs_work, do_dbs_timer, NULL);
429 schedule_delayed_work(&dbs_work,
Dave Jones6fe71162005-05-31 19:03:44 -0700430 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 return;
432}
433
434static inline void dbs_timer_exit(void)
435{
436 cancel_delayed_work(&dbs_work);
437 return;
438}
439
440static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
441 unsigned int event)
442{
443 unsigned int cpu = policy->cpu;
444 struct cpu_dbs_info_s *this_dbs_info;
445 unsigned int j;
446
447 this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
448
449 switch (event) {
450 case CPUFREQ_GOV_START:
451 if ((!cpu_online(cpu)) ||
452 (!policy->cur))
453 return -EINVAL;
454
455 if (policy->cpuinfo.transition_latency >
456 (TRANSITION_LATENCY_LIMIT * 1000))
457 return -EINVAL;
458 if (this_dbs_info->enable) /* Already enabled */
459 break;
460
461 down(&dbs_sem);
462 for_each_cpu_mask(j, policy->cpus) {
463 struct cpu_dbs_info_s *j_dbs_info;
464 j_dbs_info = &per_cpu(cpu_dbs_info, j);
465 j_dbs_info->cur_policy = policy;
466
Dave Jonesdac1c1a2005-05-31 19:03:49 -0700467 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700468 j_dbs_info->prev_cpu_idle_down
469 = j_dbs_info->prev_cpu_idle_up;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 }
471 this_dbs_info->enable = 1;
472 sysfs_create_group(&policy->kobj, &dbs_attr_group);
473 dbs_enable++;
474 /*
475 * Start the timerschedule work, when this governor
476 * is used for first time
477 */
478 if (dbs_enable == 1) {
479 unsigned int latency;
480 /* policy latency is in nS. Convert it to uS first */
481
482 latency = policy->cpuinfo.transition_latency;
483 if (latency < 1000)
484 latency = 1000;
485
486 def_sampling_rate = (latency / 1000) *
487 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
488 dbs_tuners_ins.sampling_rate = def_sampling_rate;
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700489 dbs_tuners_ins.ignore_nice = 0;
Dave Jones1206aaa2005-05-31 19:03:48 -0700490 dbs_tuners_ins.freq_step = 5;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
492 dbs_timer_init();
493 }
494
495 up(&dbs_sem);
496 break;
497
498 case CPUFREQ_GOV_STOP:
499 down(&dbs_sem);
500 this_dbs_info->enable = 0;
501 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
502 dbs_enable--;
503 /*
504 * Stop the timerschedule work, when this governor
505 * is used for first time
506 */
507 if (dbs_enable == 0)
508 dbs_timer_exit();
509
510 up(&dbs_sem);
511
512 break;
513
514 case CPUFREQ_GOV_LIMITS:
515 down(&dbs_sem);
516 if (policy->max < this_dbs_info->cur_policy->cur)
517 __cpufreq_driver_target(
518 this_dbs_info->cur_policy,
519 policy->max, CPUFREQ_RELATION_H);
520 else if (policy->min > this_dbs_info->cur_policy->cur)
521 __cpufreq_driver_target(
522 this_dbs_info->cur_policy,
523 policy->min, CPUFREQ_RELATION_L);
524 up(&dbs_sem);
525 break;
526 }
527 return 0;
528}
529
Dave Jones7f335d42005-05-31 19:03:46 -0700530static struct cpufreq_governor cpufreq_gov_dbs = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 .name = "ondemand",
532 .governor = cpufreq_governor_dbs,
533 .owner = THIS_MODULE,
534};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535
536static int __init cpufreq_gov_dbs_init(void)
537{
538 return cpufreq_register_governor(&cpufreq_gov_dbs);
539}
540
541static void __exit cpufreq_gov_dbs_exit(void)
542{
543 /* Make sure that the scheduled work is indeed not running */
544 flush_scheduled_work();
545
546 cpufreq_unregister_governor(&cpufreq_gov_dbs);
547}
548
549
550MODULE_AUTHOR ("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
551MODULE_DESCRIPTION ("'cpufreq_ondemand' - A dynamic cpufreq governor for "
552 "Low Latency Frequency Transition capable processors");
553MODULE_LICENSE ("GPL");
554
555module_init(cpufreq_gov_dbs_init);
556module_exit(cpufreq_gov_dbs_exit);