Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * drivers/cpufreq/cpufreq_ondemand.c |
| 3 | * |
| 4 | * Copyright (C) 2001 Russell King |
| 5 | * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. |
| 6 | * Jun Nakajima <jun.nakajima@intel.com> |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License version 2 as |
| 10 | * published by the Free Software Foundation. |
| 11 | */ |
| 12 | |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 14 | |
Viresh Kumar | 5ff0a26 | 2013-08-06 22:53:03 +0530 | [diff] [blame] | 15 | #include <linux/cpu.h> |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 16 | #include <linux/percpu-defs.h> |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 17 | #include <linux/slab.h> |
venkatesh.pallipadi@intel.com | 8080091 | 2008-08-04 11:59:12 -0700 | [diff] [blame] | 18 | #include <linux/tick.h> |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 19 | #include "cpufreq_governor.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | |
Stratos Karafotis | 06eb09d | 2013-02-08 17:24:18 +0000 | [diff] [blame] | 21 | /* On-demand governor macros */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #define DEF_FREQUENCY_UP_THRESHOLD (80) |
David C Niemi | 3f78a9f | 2010-10-06 16:54:24 -0400 | [diff] [blame] | 23 | #define DEF_SAMPLING_DOWN_FACTOR (1) |
| 24 | #define MAX_SAMPLING_DOWN_FACTOR (100000) |
venkatesh.pallipadi@intel.com | 8080091 | 2008-08-04 11:59:12 -0700 | [diff] [blame] | 25 | #define MICRO_FREQUENCY_UP_THRESHOLD (95) |
Thomas Renninger | cef9615 | 2009-04-22 13:48:29 +0200 | [diff] [blame] | 26 | #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) |
Dave Jones | c29f140 | 2005-05-31 19:03:50 -0700 | [diff] [blame] | 27 | #define MIN_FREQUENCY_UP_THRESHOLD (11) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #define MAX_FREQUENCY_UP_THRESHOLD (100) |
| 29 | |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 30 | static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info); |
Thomas Renninger | 112124a | 2009-02-04 11:55:12 +0100 | [diff] [blame] | 31 | |
Jacob Shin | fb30809 | 2013-04-02 09:56:56 -0500 | [diff] [blame] | 32 | static struct od_ops od_ops; |
| 33 | |
Fabio Baltieri | 3e33ee9 | 2012-11-26 18:10:12 +0000 | [diff] [blame] | 34 | static struct cpufreq_governor cpufreq_gov_ondemand; |
Fabio Baltieri | 3e33ee9 | 2012-11-26 18:10:12 +0000 | [diff] [blame] | 35 | |
Jacob Shin | c283755 | 2013-06-25 22:42:37 +0200 | [diff] [blame] | 36 | static unsigned int default_powersave_bias; |
| 37 | |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 38 | static void ondemand_powersave_bias_init_cpu(int cpu) |
Arjan van de Ven | 6b8fcd9 | 2010-05-09 08:26:06 -0700 | [diff] [blame] | 39 | { |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 40 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); |
Arjan van de Ven | 6b8fcd9 | 2010-05-09 08:26:06 -0700 | [diff] [blame] | 41 | |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 42 | dbs_info->freq_table = cpufreq_frequency_get_table(cpu); |
| 43 | dbs_info->freq_lo = 0; |
| 44 | } |
Arjan van de Ven | 6b8fcd9 | 2010-05-09 08:26:06 -0700 | [diff] [blame] | 45 | |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 46 | /* |
| 47 | * Not all CPUs want IO time to be accounted as busy; this depends on how |
| 48 | * efficient idling at a higher frequency/voltage is. |
| 49 | * Pavel Machek says this is not so for various generations of AMD and old |
| 50 | * Intel systems. |
Stratos Karafotis | 06eb09d | 2013-02-08 17:24:18 +0000 | [diff] [blame] | 51 | * Mike Chan (android.com) claims this is also not true for ARM. |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 52 | * Because of this, whitelist specific known (series) of CPUs by default, and |
| 53 | * leave all others up to the user. |
| 54 | */ |
| 55 | static int should_io_be_busy(void) |
| 56 | { |
| 57 | #if defined(CONFIG_X86) |
| 58 | /* |
Stratos Karafotis | 06eb09d | 2013-02-08 17:24:18 +0000 | [diff] [blame] | 59 | * For Intel, Core 2 (model 15) and later have an efficient idle. |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 60 | */ |
| 61 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && |
| 62 | boot_cpu_data.x86 == 6 && |
| 63 | boot_cpu_data.x86_model >= 15) |
| 64 | return 1; |
| 65 | #endif |
| 66 | return 0; |
Arjan van de Ven | 6b8fcd9 | 2010-05-09 08:26:06 -0700 | [diff] [blame] | 67 | } |
| 68 | |
Alexey Starikovskiy | 05ca035 | 2006-07-31 22:28:12 +0400 | [diff] [blame] | 69 | /* |
| 70 | * Find right freq to be set now with powersave_bias on. |
| 71 | * Returns the freq_hi to be used right now and will set freq_hi_jiffies, |
| 72 | * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. |
| 73 | */ |
Jacob Shin | fb30809 | 2013-04-02 09:56:56 -0500 | [diff] [blame] | 74 | static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy, |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 75 | unsigned int freq_next, unsigned int relation) |
Alexey Starikovskiy | 05ca035 | 2006-07-31 22:28:12 +0400 | [diff] [blame] | 76 | { |
| 77 | unsigned int freq_req, freq_reduc, freq_avg; |
| 78 | unsigned int freq_hi, freq_lo; |
| 79 | unsigned int index = 0; |
| 80 | unsigned int jiffies_total, jiffies_hi, jiffies_lo; |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 81 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, |
Tejun Heo | 245b2e7 | 2009-06-24 15:13:48 +0900 | [diff] [blame] | 82 | policy->cpu); |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 83 | struct dbs_data *dbs_data = policy->governor_data; |
| 84 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
Alexey Starikovskiy | 05ca035 | 2006-07-31 22:28:12 +0400 | [diff] [blame] | 85 | |
| 86 | if (!dbs_info->freq_table) { |
| 87 | dbs_info->freq_lo = 0; |
| 88 | dbs_info->freq_lo_jiffies = 0; |
| 89 | return freq_next; |
| 90 | } |
| 91 | |
| 92 | cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, |
| 93 | relation, &index); |
| 94 | freq_req = dbs_info->freq_table[index].frequency; |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 95 | freq_reduc = freq_req * od_tuners->powersave_bias / 1000; |
Alexey Starikovskiy | 05ca035 | 2006-07-31 22:28:12 +0400 | [diff] [blame] | 96 | freq_avg = freq_req - freq_reduc; |
| 97 | |
| 98 | /* Find freq bounds for freq_avg in freq_table */ |
| 99 | index = 0; |
| 100 | cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, |
| 101 | CPUFREQ_RELATION_H, &index); |
| 102 | freq_lo = dbs_info->freq_table[index].frequency; |
| 103 | index = 0; |
| 104 | cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, |
| 105 | CPUFREQ_RELATION_L, &index); |
| 106 | freq_hi = dbs_info->freq_table[index].frequency; |
| 107 | |
| 108 | /* Find out how long we have to be in hi and lo freqs */ |
| 109 | if (freq_hi == freq_lo) { |
| 110 | dbs_info->freq_lo = 0; |
| 111 | dbs_info->freq_lo_jiffies = 0; |
| 112 | return freq_lo; |
| 113 | } |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 114 | jiffies_total = usecs_to_jiffies(od_tuners->sampling_rate); |
Alexey Starikovskiy | 05ca035 | 2006-07-31 22:28:12 +0400 | [diff] [blame] | 115 | jiffies_hi = (freq_avg - freq_lo) * jiffies_total; |
| 116 | jiffies_hi += ((freq_hi - freq_lo) / 2); |
| 117 | jiffies_hi /= (freq_hi - freq_lo); |
| 118 | jiffies_lo = jiffies_total - jiffies_hi; |
| 119 | dbs_info->freq_lo = freq_lo; |
| 120 | dbs_info->freq_lo_jiffies = jiffies_lo; |
| 121 | dbs_info->freq_hi_jiffies = jiffies_hi; |
| 122 | return freq_hi; |
| 123 | } |
| 124 | |
| 125 | static void ondemand_powersave_bias_init(void) |
| 126 | { |
| 127 | int i; |
| 128 | for_each_online_cpu(i) { |
venkatesh.pallipadi@intel.com | 5a75c82 | 2009-07-02 17:08:32 -0700 | [diff] [blame] | 129 | ondemand_powersave_bias_init_cpu(i); |
Alexey Starikovskiy | 05ca035 | 2006-07-31 22:28:12 +0400 | [diff] [blame] | 130 | } |
| 131 | } |
| 132 | |
Viresh Kumar | 3a3e9e0 | 2013-08-06 22:53:05 +0530 | [diff] [blame] | 133 | static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq) |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 134 | { |
Viresh Kumar | 3a3e9e0 | 2013-08-06 22:53:05 +0530 | [diff] [blame] | 135 | struct dbs_data *dbs_data = policy->governor_data; |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 136 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
| 137 | |
| 138 | if (od_tuners->powersave_bias) |
Viresh Kumar | 3a3e9e0 | 2013-08-06 22:53:05 +0530 | [diff] [blame] | 139 | freq = od_ops.powersave_bias_target(policy, freq, |
Jacob Shin | fb30809 | 2013-04-02 09:56:56 -0500 | [diff] [blame] | 140 | CPUFREQ_RELATION_H); |
Viresh Kumar | 3a3e9e0 | 2013-08-06 22:53:05 +0530 | [diff] [blame] | 141 | else if (policy->cur == policy->max) |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 142 | return; |
| 143 | |
Viresh Kumar | 3a3e9e0 | 2013-08-06 22:53:05 +0530 | [diff] [blame] | 144 | __cpufreq_driver_target(policy, freq, od_tuners->powersave_bias ? |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 145 | CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); |
| 146 | } |
| 147 | |
| 148 | /* |
| 149 | * Every sampling_rate, we check, if current idle time is less than 20% |
Stratos Karafotis | dfa5bb6 | 2013-06-05 19:01:25 +0300 | [diff] [blame] | 150 | * (default), then we try to increase frequency. Else, we adjust the frequency |
| 151 | * proportional to load. |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 152 | */ |
Stratos Karafotis | dfa5bb6 | 2013-06-05 19:01:25 +0300 | [diff] [blame] | 153 | static void od_check_cpu(int cpu, unsigned int load) |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 154 | { |
| 155 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); |
Viresh Kumar | 44152cb | 2015-07-18 11:30:59 +0530 | [diff] [blame] | 156 | struct cpufreq_policy *policy = dbs_info->cdbs.shared->policy; |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 157 | struct dbs_data *dbs_data = policy->governor_data; |
| 158 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 159 | |
| 160 | dbs_info->freq_lo = 0; |
| 161 | |
| 162 | /* Check for frequency increase */ |
Stratos Karafotis | dfa5bb6 | 2013-06-05 19:01:25 +0300 | [diff] [blame] | 163 | if (load > od_tuners->up_threshold) { |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 164 | /* If switching to max speed, apply sampling_down_factor */ |
| 165 | if (policy->cur < policy->max) |
| 166 | dbs_info->rate_mult = |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 167 | od_tuners->sampling_down_factor; |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 168 | dbs_freq_increase(policy, policy->max); |
Stratos Karafotis | dfa5bb6 | 2013-06-05 19:01:25 +0300 | [diff] [blame] | 169 | } else { |
| 170 | /* Calculate the next frequency proportional to load */ |
Stratos Karafotis | 6393d6a | 2014-06-30 19:59:34 +0300 | [diff] [blame] | 171 | unsigned int freq_next, min_f, max_f; |
| 172 | |
| 173 | min_f = policy->cpuinfo.min_freq; |
| 174 | max_f = policy->cpuinfo.max_freq; |
| 175 | freq_next = min_f + load * (max_f - min_f) / 100; |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 176 | |
| 177 | /* No longer fully busy, reset rate_mult */ |
| 178 | dbs_info->rate_mult = 1; |
| 179 | |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 180 | if (!od_tuners->powersave_bias) { |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 181 | __cpufreq_driver_target(policy, freq_next, |
Stratos Karafotis | 6393d6a | 2014-06-30 19:59:34 +0300 | [diff] [blame] | 182 | CPUFREQ_RELATION_C); |
Jacob Shin | fb30809 | 2013-04-02 09:56:56 -0500 | [diff] [blame] | 183 | return; |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 184 | } |
Jacob Shin | fb30809 | 2013-04-02 09:56:56 -0500 | [diff] [blame] | 185 | |
| 186 | freq_next = od_ops.powersave_bias_target(policy, freq_next, |
| 187 | CPUFREQ_RELATION_L); |
Stratos Karafotis | 6393d6a | 2014-06-30 19:59:34 +0300 | [diff] [blame] | 188 | __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_C); |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 189 | } |
| 190 | } |
| 191 | |
Viresh Kumar | affde5d | 2015-12-03 09:37:51 +0530 | [diff] [blame] | 192 | static unsigned int od_dbs_timer(struct cpufreq_policy *policy, bool modify_all) |
Fabio Baltieri | da53d61 | 2012-12-27 14:55:40 +0000 | [diff] [blame] | 193 | { |
Viresh Kumar | affde5d | 2015-12-03 09:37:51 +0530 | [diff] [blame] | 194 | struct dbs_data *dbs_data = policy->governor_data; |
Viresh Kumar | 44152cb | 2015-07-18 11:30:59 +0530 | [diff] [blame] | 195 | unsigned int cpu = policy->cpu; |
Viresh Kumar | 43e0ee3 | 2015-07-18 11:31:00 +0530 | [diff] [blame] | 196 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, |
Viresh Kumar | 4447266 | 2013-01-31 17:28:02 +0000 | [diff] [blame] | 197 | cpu); |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 198 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
Viresh Kumar | 43e0ee3 | 2015-07-18 11:31:00 +0530 | [diff] [blame] | 199 | int delay = 0, sample_type = dbs_info->sample_type; |
Fabio Baltieri | da53d61 | 2012-12-27 14:55:40 +0000 | [diff] [blame] | 200 | |
Viresh Kumar | 43e0ee3 | 2015-07-18 11:31:00 +0530 | [diff] [blame] | 201 | if (!modify_all) |
Viresh Kumar | 9d44592 | 2013-02-27 11:06:36 +0530 | [diff] [blame] | 202 | goto max_delay; |
Viresh Kumar | 4447266 | 2013-01-31 17:28:02 +0000 | [diff] [blame] | 203 | |
| 204 | /* Common NORMAL_SAMPLE setup */ |
Viresh Kumar | 43e0ee3 | 2015-07-18 11:31:00 +0530 | [diff] [blame] | 205 | dbs_info->sample_type = OD_NORMAL_SAMPLE; |
Viresh Kumar | 4447266 | 2013-01-31 17:28:02 +0000 | [diff] [blame] | 206 | if (sample_type == OD_SUB_SAMPLE) { |
Viresh Kumar | 43e0ee3 | 2015-07-18 11:31:00 +0530 | [diff] [blame] | 207 | delay = dbs_info->freq_lo_jiffies; |
| 208 | __cpufreq_driver_target(policy, dbs_info->freq_lo, |
Viresh Kumar | 42994af | 2015-06-19 17:18:05 +0530 | [diff] [blame] | 209 | CPUFREQ_RELATION_H); |
Fabio Baltieri | da53d61 | 2012-12-27 14:55:40 +0000 | [diff] [blame] | 210 | } else { |
Viresh Kumar | 9d44592 | 2013-02-27 11:06:36 +0530 | [diff] [blame] | 211 | dbs_check_cpu(dbs_data, cpu); |
Viresh Kumar | 43e0ee3 | 2015-07-18 11:31:00 +0530 | [diff] [blame] | 212 | if (dbs_info->freq_lo) { |
Viresh Kumar | 4447266 | 2013-01-31 17:28:02 +0000 | [diff] [blame] | 213 | /* Setup timer for SUB_SAMPLE */ |
Viresh Kumar | 43e0ee3 | 2015-07-18 11:31:00 +0530 | [diff] [blame] | 214 | dbs_info->sample_type = OD_SUB_SAMPLE; |
| 215 | delay = dbs_info->freq_hi_jiffies; |
Viresh Kumar | 4447266 | 2013-01-31 17:28:02 +0000 | [diff] [blame] | 216 | } |
Fabio Baltieri | da53d61 | 2012-12-27 14:55:40 +0000 | [diff] [blame] | 217 | } |
Viresh Kumar | 4447266 | 2013-01-31 17:28:02 +0000 | [diff] [blame] | 218 | |
Viresh Kumar | 9d44592 | 2013-02-27 11:06:36 +0530 | [diff] [blame] | 219 | max_delay: |
| 220 | if (!delay) |
| 221 | delay = delay_for_sampling_rate(od_tuners->sampling_rate |
Viresh Kumar | 43e0ee3 | 2015-07-18 11:31:00 +0530 | [diff] [blame] | 222 | * dbs_info->rate_mult); |
Viresh Kumar | 9d44592 | 2013-02-27 11:06:36 +0530 | [diff] [blame] | 223 | |
Viresh Kumar | 43e0ee3 | 2015-07-18 11:31:00 +0530 | [diff] [blame] | 224 | return delay; |
Fabio Baltieri | da53d61 | 2012-12-27 14:55:40 +0000 | [diff] [blame] | 225 | } |
| 226 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | /************************** sysfs interface ************************/ |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 228 | static struct common_dbs_data od_dbs_cdata; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | |
MyungJoo Ham | fd0ef7a | 2012-02-29 17:54:41 +0900 | [diff] [blame] | 230 | /** |
| 231 | * update_sampling_rate - update sampling rate effective immediately if needed. |
| 232 | * @new_rate: new sampling rate |
| 233 | * |
Stratos Karafotis | 06eb09d | 2013-02-08 17:24:18 +0000 | [diff] [blame] | 234 | * If new rate is smaller than the old, simply updating |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 235 | * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the |
| 236 | * original sampling_rate was 1 second and the requested new sampling rate is 10 |
| 237 | * ms because the user needs immediate reaction from ondemand governor, but not |
| 238 | * sure if higher frequency will be required or not, then, the governor may |
| 239 | * change the sampling rate too late; up to 1 second later. Thus, if we are |
| 240 | * reducing the sampling rate, we need to make the new value effective |
| 241 | * immediately. |
MyungJoo Ham | fd0ef7a | 2012-02-29 17:54:41 +0900 | [diff] [blame] | 242 | */ |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 243 | static void update_sampling_rate(struct dbs_data *dbs_data, |
| 244 | unsigned int new_rate) |
MyungJoo Ham | fd0ef7a | 2012-02-29 17:54:41 +0900 | [diff] [blame] | 245 | { |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 246 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
Viresh Kumar | f08f638 | 2015-12-03 09:37:54 +0530 | [diff] [blame] | 247 | struct cpumask cpumask; |
MyungJoo Ham | fd0ef7a | 2012-02-29 17:54:41 +0900 | [diff] [blame] | 248 | int cpu; |
| 249 | |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 250 | od_tuners->sampling_rate = new_rate = max(new_rate, |
| 251 | dbs_data->min_sampling_rate); |
MyungJoo Ham | fd0ef7a | 2012-02-29 17:54:41 +0900 | [diff] [blame] | 252 | |
Viresh Kumar | e128c86 | 2015-12-03 09:37:49 +0530 | [diff] [blame] | 253 | /* |
| 254 | * Lock governor so that governor start/stop can't execute in parallel. |
| 255 | */ |
| 256 | mutex_lock(&od_dbs_cdata.mutex); |
| 257 | |
Viresh Kumar | f08f638 | 2015-12-03 09:37:54 +0530 | [diff] [blame] | 258 | cpumask_copy(&cpumask, cpu_online_mask); |
| 259 | |
| 260 | for_each_cpu(cpu, &cpumask) { |
MyungJoo Ham | fd0ef7a | 2012-02-29 17:54:41 +0900 | [diff] [blame] | 261 | struct cpufreq_policy *policy; |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 262 | struct od_cpu_dbs_info_s *dbs_info; |
Viresh Kumar | e128c86 | 2015-12-03 09:37:49 +0530 | [diff] [blame] | 263 | struct cpu_dbs_info *cdbs; |
| 264 | struct cpu_common_dbs_info *shared; |
MyungJoo Ham | fd0ef7a | 2012-02-29 17:54:41 +0900 | [diff] [blame] | 265 | unsigned long next_sampling, appointed_at; |
| 266 | |
Fabio Baltieri | 8ee2ec5 | 2012-12-27 14:55:42 +0000 | [diff] [blame] | 267 | dbs_info = &per_cpu(od_cpu_dbs_info, cpu); |
Viresh Kumar | e128c86 | 2015-12-03 09:37:49 +0530 | [diff] [blame] | 268 | cdbs = &dbs_info->cdbs; |
| 269 | shared = cdbs->shared; |
| 270 | |
| 271 | /* |
| 272 | * A valid shared and shared->policy means governor hasn't |
| 273 | * stopped or exited yet. |
| 274 | */ |
| 275 | if (!shared || !shared->policy) |
| 276 | continue; |
| 277 | |
| 278 | policy = shared->policy; |
| 279 | |
Viresh Kumar | f08f638 | 2015-12-03 09:37:54 +0530 | [diff] [blame] | 280 | /* clear all CPUs of this policy */ |
| 281 | cpumask_andnot(&cpumask, &cpumask, policy->cpus); |
| 282 | |
Viresh Kumar | e128c86 | 2015-12-03 09:37:49 +0530 | [diff] [blame] | 283 | /* |
| 284 | * Update sampling rate for CPUs whose policy is governed by |
| 285 | * dbs_data. In case of governor_per_policy, only a single |
| 286 | * policy will be governed by dbs_data, otherwise there can be |
| 287 | * multiple policies that are governed by the same dbs_data. |
| 288 | */ |
| 289 | if (dbs_data != policy->governor_data) |
| 290 | continue; |
MyungJoo Ham | fd0ef7a | 2012-02-29 17:54:41 +0900 | [diff] [blame] | 291 | |
Viresh Kumar | f08f638 | 2015-12-03 09:37:54 +0530 | [diff] [blame] | 292 | /* |
| 293 | * Checking this for any CPU should be fine, timers for all of |
| 294 | * them are scheduled together. |
| 295 | */ |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 296 | next_sampling = jiffies + usecs_to_jiffies(new_rate); |
Viresh Kumar | 70f43e5 | 2015-12-09 07:34:42 +0530 | [diff] [blame] | 297 | appointed_at = dbs_info->cdbs.timer.expires; |
MyungJoo Ham | fd0ef7a | 2012-02-29 17:54:41 +0900 | [diff] [blame] | 298 | |
| 299 | if (time_before(next_sampling, appointed_at)) { |
Viresh Kumar | 70f43e5 | 2015-12-09 07:34:42 +0530 | [diff] [blame] | 300 | gov_cancel_work(shared); |
| 301 | gov_add_timers(policy, usecs_to_jiffies(new_rate)); |
MyungJoo Ham | fd0ef7a | 2012-02-29 17:54:41 +0900 | [diff] [blame] | 302 | |
| 303 | } |
MyungJoo Ham | fd0ef7a | 2012-02-29 17:54:41 +0900 | [diff] [blame] | 304 | } |
Viresh Kumar | e128c86 | 2015-12-03 09:37:49 +0530 | [diff] [blame] | 305 | |
| 306 | mutex_unlock(&od_dbs_cdata.mutex); |
MyungJoo Ham | fd0ef7a | 2012-02-29 17:54:41 +0900 | [diff] [blame] | 307 | } |
| 308 | |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 309 | static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf, |
| 310 | size_t count) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | { |
| 312 | unsigned int input; |
| 313 | int ret; |
Venkatesh Pallipadi | ffac80e | 2006-06-28 13:52:18 -0700 | [diff] [blame] | 314 | ret = sscanf(buf, "%u", &input); |
venkatesh.pallipadi@intel.com | 5a75c82 | 2009-07-02 17:08:32 -0700 | [diff] [blame] | 315 | if (ret != 1) |
| 316 | return -EINVAL; |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 317 | |
| 318 | update_sampling_rate(dbs_data, input); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 | return count; |
| 320 | } |
| 321 | |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 322 | static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf, |
| 323 | size_t count) |
Arjan van de Ven | 19379b1 | 2010-05-09 08:26:51 -0700 | [diff] [blame] | 324 | { |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 325 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
Arjan van de Ven | 19379b1 | 2010-05-09 08:26:51 -0700 | [diff] [blame] | 326 | unsigned int input; |
| 327 | int ret; |
Stratos Karafotis | 9366d84 | 2013-02-28 16:57:32 +0000 | [diff] [blame] | 328 | unsigned int j; |
Arjan van de Ven | 19379b1 | 2010-05-09 08:26:51 -0700 | [diff] [blame] | 329 | |
| 330 | ret = sscanf(buf, "%u", &input); |
| 331 | if (ret != 1) |
| 332 | return -EINVAL; |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 333 | od_tuners->io_is_busy = !!input; |
Stratos Karafotis | 9366d84 | 2013-02-28 16:57:32 +0000 | [diff] [blame] | 334 | |
| 335 | /* we need to re-evaluate prev_cpu_idle */ |
| 336 | for_each_online_cpu(j) { |
| 337 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, |
| 338 | j); |
| 339 | dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, |
| 340 | &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy); |
| 341 | } |
Arjan van de Ven | 19379b1 | 2010-05-09 08:26:51 -0700 | [diff] [blame] | 342 | return count; |
| 343 | } |
| 344 | |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 345 | static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf, |
| 346 | size_t count) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | { |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 348 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | unsigned int input; |
| 350 | int ret; |
Venkatesh Pallipadi | ffac80e | 2006-06-28 13:52:18 -0700 | [diff] [blame] | 351 | ret = sscanf(buf, "%u", &input); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 352 | |
Dave Jones | 32ee8c3 | 2006-02-28 00:43:23 -0500 | [diff] [blame] | 353 | if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || |
Dave Jones | c29f140 | 2005-05-31 19:03:50 -0700 | [diff] [blame] | 354 | input < MIN_FREQUENCY_UP_THRESHOLD) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 | return -EINVAL; |
| 356 | } |
Stratos Karafotis | 4bd4e42 | 2013-02-06 13:34:00 +0100 | [diff] [blame] | 357 | |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 358 | od_tuners->up_threshold = input; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | return count; |
| 360 | } |
| 361 | |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 362 | static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data, |
| 363 | const char *buf, size_t count) |
David C Niemi | 3f78a9f | 2010-10-06 16:54:24 -0400 | [diff] [blame] | 364 | { |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 365 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
David C Niemi | 3f78a9f | 2010-10-06 16:54:24 -0400 | [diff] [blame] | 366 | unsigned int input, j; |
| 367 | int ret; |
| 368 | ret = sscanf(buf, "%u", &input); |
| 369 | |
| 370 | if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) |
| 371 | return -EINVAL; |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 372 | od_tuners->sampling_down_factor = input; |
David C Niemi | 3f78a9f | 2010-10-06 16:54:24 -0400 | [diff] [blame] | 373 | |
| 374 | /* Reset down sampling multiplier in case it was active */ |
| 375 | for_each_online_cpu(j) { |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 376 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, |
| 377 | j); |
David C Niemi | 3f78a9f | 2010-10-06 16:54:24 -0400 | [diff] [blame] | 378 | dbs_info->rate_mult = 1; |
| 379 | } |
David C Niemi | 3f78a9f | 2010-10-06 16:54:24 -0400 | [diff] [blame] | 380 | return count; |
| 381 | } |
| 382 | |
Viresh Kumar | 6c4640c | 2013-08-05 12:28:02 +0530 | [diff] [blame] | 383 | static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data, |
| 384 | const char *buf, size_t count) |
Dave Jones | 3d5ee9e | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 385 | { |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 386 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
Dave Jones | 3d5ee9e | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 387 | unsigned int input; |
| 388 | int ret; |
| 389 | |
| 390 | unsigned int j; |
Dave Jones | 32ee8c3 | 2006-02-28 00:43:23 -0500 | [diff] [blame] | 391 | |
Venkatesh Pallipadi | ffac80e | 2006-06-28 13:52:18 -0700 | [diff] [blame] | 392 | ret = sscanf(buf, "%u", &input); |
Dave Jones | 2b03f89 | 2009-01-18 01:43:44 -0500 | [diff] [blame] | 393 | if (ret != 1) |
Dave Jones | 3d5ee9e | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 394 | return -EINVAL; |
| 395 | |
Dave Jones | 2b03f89 | 2009-01-18 01:43:44 -0500 | [diff] [blame] | 396 | if (input > 1) |
Dave Jones | 3d5ee9e | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 397 | input = 1; |
Dave Jones | 32ee8c3 | 2006-02-28 00:43:23 -0500 | [diff] [blame] | 398 | |
Viresh Kumar | 6c4640c | 2013-08-05 12:28:02 +0530 | [diff] [blame] | 399 | if (input == od_tuners->ignore_nice_load) { /* nothing to do */ |
Dave Jones | 3d5ee9e | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 400 | return count; |
| 401 | } |
Viresh Kumar | 6c4640c | 2013-08-05 12:28:02 +0530 | [diff] [blame] | 402 | od_tuners->ignore_nice_load = input; |
Dave Jones | 3d5ee9e | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 403 | |
Venkatesh Pallipadi | ccb2fe2 | 2006-06-28 13:49:52 -0700 | [diff] [blame] | 404 | /* we need to re-evaluate prev_cpu_idle */ |
Dave Jones | dac1c1a | 2005-05-31 19:03:49 -0700 | [diff] [blame] | 405 | for_each_online_cpu(j) { |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 406 | struct od_cpu_dbs_info_s *dbs_info; |
Tejun Heo | 245b2e7 | 2009-06-24 15:13:48 +0900 | [diff] [blame] | 407 | dbs_info = &per_cpu(od_cpu_dbs_info, j); |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 408 | dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, |
Stratos Karafotis | 9366d84 | 2013-02-28 16:57:32 +0000 | [diff] [blame] | 409 | &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy); |
Viresh Kumar | 6c4640c | 2013-08-05 12:28:02 +0530 | [diff] [blame] | 410 | if (od_tuners->ignore_nice_load) |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 411 | dbs_info->cdbs.prev_cpu_nice = |
| 412 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; |
Venkatesh Pallipadi | 1ca3abd | 2009-01-23 09:25:02 -0500 | [diff] [blame] | 413 | |
Dave Jones | 3d5ee9e | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 414 | } |
Dave Jones | 3d5ee9e | 2005-05-31 19:03:47 -0700 | [diff] [blame] | 415 | return count; |
| 416 | } |
| 417 | |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 418 | static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf, |
| 419 | size_t count) |
Alexey Starikovskiy | 05ca035 | 2006-07-31 22:28:12 +0400 | [diff] [blame] | 420 | { |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 421 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
Alexey Starikovskiy | 05ca035 | 2006-07-31 22:28:12 +0400 | [diff] [blame] | 422 | unsigned int input; |
| 423 | int ret; |
| 424 | ret = sscanf(buf, "%u", &input); |
| 425 | |
| 426 | if (ret != 1) |
| 427 | return -EINVAL; |
| 428 | |
| 429 | if (input > 1000) |
| 430 | input = 1000; |
| 431 | |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 432 | od_tuners->powersave_bias = input; |
Alexey Starikovskiy | 05ca035 | 2006-07-31 22:28:12 +0400 | [diff] [blame] | 433 | ondemand_powersave_bias_init(); |
Alexey Starikovskiy | 05ca035 | 2006-07-31 22:28:12 +0400 | [diff] [blame] | 434 | return count; |
| 435 | } |
| 436 | |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 437 | show_store_one(od, sampling_rate); |
| 438 | show_store_one(od, io_is_busy); |
| 439 | show_store_one(od, up_threshold); |
| 440 | show_store_one(od, sampling_down_factor); |
Viresh Kumar | 6c4640c | 2013-08-05 12:28:02 +0530 | [diff] [blame] | 441 | show_store_one(od, ignore_nice_load); |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 442 | show_store_one(od, powersave_bias); |
| 443 | declare_show_sampling_rate_min(od); |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 444 | |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 445 | gov_sys_pol_attr_rw(sampling_rate); |
| 446 | gov_sys_pol_attr_rw(io_is_busy); |
| 447 | gov_sys_pol_attr_rw(up_threshold); |
| 448 | gov_sys_pol_attr_rw(sampling_down_factor); |
Viresh Kumar | 6c4640c | 2013-08-05 12:28:02 +0530 | [diff] [blame] | 449 | gov_sys_pol_attr_rw(ignore_nice_load); |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 450 | gov_sys_pol_attr_rw(powersave_bias); |
| 451 | gov_sys_pol_attr_ro(sampling_rate_min); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 452 | |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 453 | static struct attribute *dbs_attributes_gov_sys[] = { |
| 454 | &sampling_rate_min_gov_sys.attr, |
| 455 | &sampling_rate_gov_sys.attr, |
| 456 | &up_threshold_gov_sys.attr, |
| 457 | &sampling_down_factor_gov_sys.attr, |
Viresh Kumar | 6c4640c | 2013-08-05 12:28:02 +0530 | [diff] [blame] | 458 | &ignore_nice_load_gov_sys.attr, |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 459 | &powersave_bias_gov_sys.attr, |
| 460 | &io_is_busy_gov_sys.attr, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 461 | NULL |
| 462 | }; |
| 463 | |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 464 | static struct attribute_group od_attr_group_gov_sys = { |
| 465 | .attrs = dbs_attributes_gov_sys, |
| 466 | .name = "ondemand", |
| 467 | }; |
| 468 | |
| 469 | static struct attribute *dbs_attributes_gov_pol[] = { |
| 470 | &sampling_rate_min_gov_pol.attr, |
| 471 | &sampling_rate_gov_pol.attr, |
| 472 | &up_threshold_gov_pol.attr, |
| 473 | &sampling_down_factor_gov_pol.attr, |
Viresh Kumar | 6c4640c | 2013-08-05 12:28:02 +0530 | [diff] [blame] | 474 | &ignore_nice_load_gov_pol.attr, |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 475 | &powersave_bias_gov_pol.attr, |
| 476 | &io_is_busy_gov_pol.attr, |
| 477 | NULL |
| 478 | }; |
| 479 | |
| 480 | static struct attribute_group od_attr_group_gov_pol = { |
| 481 | .attrs = dbs_attributes_gov_pol, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 | .name = "ondemand", |
| 483 | }; |
| 484 | |
| 485 | /************************** sysfs end ************************/ |
| 486 | |
Viresh Kumar | 8e0484d | 2015-06-03 15:57:11 +0530 | [diff] [blame] | 487 | static int od_init(struct dbs_data *dbs_data, bool notify) |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 488 | { |
| 489 | struct od_dbs_tuners *tuners; |
| 490 | u64 idle_time; |
| 491 | int cpu; |
| 492 | |
Viresh Kumar | d5b73cd | 2013-08-06 22:53:06 +0530 | [diff] [blame] | 493 | tuners = kzalloc(sizeof(*tuners), GFP_KERNEL); |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 494 | if (!tuners) { |
| 495 | pr_err("%s: kzalloc failed\n", __func__); |
| 496 | return -ENOMEM; |
| 497 | } |
| 498 | |
| 499 | cpu = get_cpu(); |
| 500 | idle_time = get_cpu_idle_time_us(cpu, NULL); |
| 501 | put_cpu(); |
| 502 | if (idle_time != -1ULL) { |
| 503 | /* Idle micro accounting is supported. Use finer thresholds */ |
| 504 | tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 505 | /* |
| 506 | * In nohz/micro accounting case we set the minimum frequency |
| 507 | * not depending on HZ, but fixed (very low). The deferred |
| 508 | * timer might skip some samples if idle/sleeping as needed. |
| 509 | */ |
| 510 | dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; |
| 511 | } else { |
| 512 | tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 513 | |
| 514 | /* For correct statistics, we need 10 ticks for each measure */ |
| 515 | dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * |
| 516 | jiffies_to_usecs(10); |
| 517 | } |
| 518 | |
| 519 | tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; |
Viresh Kumar | 6c4640c | 2013-08-05 12:28:02 +0530 | [diff] [blame] | 520 | tuners->ignore_nice_load = 0; |
Jacob Shin | c283755 | 2013-06-25 22:42:37 +0200 | [diff] [blame] | 521 | tuners->powersave_bias = default_powersave_bias; |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 522 | tuners->io_is_busy = should_io_be_busy(); |
| 523 | |
| 524 | dbs_data->tuners = tuners; |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 525 | return 0; |
| 526 | } |
| 527 | |
Viresh Kumar | 8e0484d | 2015-06-03 15:57:11 +0530 | [diff] [blame] | 528 | static void od_exit(struct dbs_data *dbs_data, bool notify) |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 529 | { |
| 530 | kfree(dbs_data->tuners); |
| 531 | } |
| 532 | |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 533 | define_get_cpu_dbs_routines(od_cpu_dbs_info); |
Mike Chan | 00e299f | 2010-01-26 17:06:47 -0800 | [diff] [blame] | 534 | |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 535 | static struct od_ops od_ops = { |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 536 | .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu, |
Jacob Shin | fb30809 | 2013-04-02 09:56:56 -0500 | [diff] [blame] | 537 | .powersave_bias_target = generic_powersave_bias_target, |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 538 | .freq_increase = dbs_freq_increase, |
| 539 | }; |
| 540 | |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 541 | static struct common_dbs_data od_dbs_cdata = { |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 542 | .governor = GOV_ONDEMAND, |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 543 | .attr_group_gov_sys = &od_attr_group_gov_sys, |
| 544 | .attr_group_gov_pol = &od_attr_group_gov_pol, |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 545 | .get_cpu_cdbs = get_cpu_cdbs, |
| 546 | .get_cpu_dbs_info_s = get_cpu_dbs_info_s, |
| 547 | .gov_dbs_timer = od_dbs_timer, |
| 548 | .gov_check_cpu = od_check_cpu, |
| 549 | .gov_ops = &od_ops, |
Viresh Kumar | 4d5dcc4 | 2013-03-27 15:58:58 +0000 | [diff] [blame] | 550 | .init = od_init, |
| 551 | .exit = od_exit, |
Viresh Kumar | 732b6d6 | 2015-06-03 15:57:13 +0530 | [diff] [blame] | 552 | .mutex = __MUTEX_INITIALIZER(od_dbs_cdata.mutex), |
Viresh Kumar | 4471a34 | 2012-10-26 00:47:42 +0200 | [diff] [blame] | 553 | }; |
| 554 | |
Rafael J. Wysocki | de1df26 | 2016-02-05 02:37:42 +0100 | [diff] [blame^] | 555 | static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy, |
| 556 | unsigned int event) |
| 557 | { |
| 558 | return cpufreq_governor_dbs(policy, &od_dbs_cdata, event); |
| 559 | } |
| 560 | |
| 561 | static struct cpufreq_governor cpufreq_gov_ondemand = { |
| 562 | .name = "ondemand", |
| 563 | .governor = od_cpufreq_governor_dbs, |
| 564 | .max_transition_latency = TRANSITION_LATENCY_LIMIT, |
| 565 | .owner = THIS_MODULE, |
| 566 | }; |
| 567 | |
Jacob Shin | fb30809 | 2013-04-02 09:56:56 -0500 | [diff] [blame] | 568 | static void od_set_powersave_bias(unsigned int powersave_bias) |
| 569 | { |
| 570 | struct cpufreq_policy *policy; |
| 571 | struct dbs_data *dbs_data; |
| 572 | struct od_dbs_tuners *od_tuners; |
| 573 | unsigned int cpu; |
| 574 | cpumask_t done; |
| 575 | |
Jacob Shin | c283755 | 2013-06-25 22:42:37 +0200 | [diff] [blame] | 576 | default_powersave_bias = powersave_bias; |
Jacob Shin | fb30809 | 2013-04-02 09:56:56 -0500 | [diff] [blame] | 577 | cpumask_clear(&done); |
| 578 | |
| 579 | get_online_cpus(); |
| 580 | for_each_online_cpu(cpu) { |
Viresh Kumar | 44152cb | 2015-07-18 11:30:59 +0530 | [diff] [blame] | 581 | struct cpu_common_dbs_info *shared; |
| 582 | |
Jacob Shin | fb30809 | 2013-04-02 09:56:56 -0500 | [diff] [blame] | 583 | if (cpumask_test_cpu(cpu, &done)) |
| 584 | continue; |
| 585 | |
Viresh Kumar | 44152cb | 2015-07-18 11:30:59 +0530 | [diff] [blame] | 586 | shared = per_cpu(od_cpu_dbs_info, cpu).cdbs.shared; |
| 587 | if (!shared) |
Jacob Shin | c283755 | 2013-06-25 22:42:37 +0200 | [diff] [blame] | 588 | continue; |
Jacob Shin | fb30809 | 2013-04-02 09:56:56 -0500 | [diff] [blame] | 589 | |
Viresh Kumar | 44152cb | 2015-07-18 11:30:59 +0530 | [diff] [blame] | 590 | policy = shared->policy; |
Jacob Shin | fb30809 | 2013-04-02 09:56:56 -0500 | [diff] [blame] | 591 | cpumask_or(&done, &done, policy->cpus); |
Jacob Shin | c283755 | 2013-06-25 22:42:37 +0200 | [diff] [blame] | 592 | |
| 593 | if (policy->governor != &cpufreq_gov_ondemand) |
| 594 | continue; |
| 595 | |
| 596 | dbs_data = policy->governor_data; |
| 597 | od_tuners = dbs_data->tuners; |
| 598 | od_tuners->powersave_bias = default_powersave_bias; |
Jacob Shin | fb30809 | 2013-04-02 09:56:56 -0500 | [diff] [blame] | 599 | } |
| 600 | put_online_cpus(); |
| 601 | } |
| 602 | |
| 603 | void od_register_powersave_bias_handler(unsigned int (*f) |
| 604 | (struct cpufreq_policy *, unsigned int, unsigned int), |
| 605 | unsigned int powersave_bias) |
| 606 | { |
| 607 | od_ops.powersave_bias_target = f; |
| 608 | od_set_powersave_bias(powersave_bias); |
| 609 | } |
| 610 | EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler); |
| 611 | |
| 612 | void od_unregister_powersave_bias_handler(void) |
| 613 | { |
| 614 | od_ops.powersave_bias_target = generic_powersave_bias_target; |
| 615 | od_set_powersave_bias(0); |
| 616 | } |
| 617 | EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler); |
| 618 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 619 | static int __init cpufreq_gov_dbs_init(void) |
| 620 | { |
Tejun Heo | 57df557 | 2011-01-26 12:12:50 +0100 | [diff] [blame] | 621 | return cpufreq_register_governor(&cpufreq_gov_ondemand); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 622 | } |
| 623 | |
| 624 | static void __exit cpufreq_gov_dbs_exit(void) |
| 625 | { |
Thomas Renninger | 1c25624 | 2007-10-02 13:28:12 -0700 | [diff] [blame] | 626 | cpufreq_unregister_governor(&cpufreq_gov_ondemand); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 627 | } |
| 628 | |
Venkatesh Pallipadi | ffac80e | 2006-06-28 13:52:18 -0700 | [diff] [blame] | 629 | MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>"); |
| 630 | MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>"); |
| 631 | MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for " |
Dave Jones | 2b03f89 | 2009-01-18 01:43:44 -0500 | [diff] [blame] | 632 | "Low Latency Frequency Transition capable processors"); |
Venkatesh Pallipadi | ffac80e | 2006-06-28 13:52:18 -0700 | [diff] [blame] | 633 | MODULE_LICENSE("GPL"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 634 | |
Johannes Weiner | 6915719 | 2008-01-17 15:21:08 -0800 | [diff] [blame] | 635 | #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND |
Rafael J. Wysocki | de1df26 | 2016-02-05 02:37:42 +0100 | [diff] [blame^] | 636 | struct cpufreq_governor *cpufreq_default_governor(void) |
| 637 | { |
| 638 | return &cpufreq_gov_ondemand; |
| 639 | } |
| 640 | |
Johannes Weiner | 6915719 | 2008-01-17 15:21:08 -0800 | [diff] [blame] | 641 | fs_initcall(cpufreq_gov_dbs_init); |
| 642 | #else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 643 | module_init(cpufreq_gov_dbs_init); |
Johannes Weiner | 6915719 | 2008-01-17 15:21:08 -0800 | [diff] [blame] | 644 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 645 | module_exit(cpufreq_gov_dbs_exit); |