Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 1 | /* arch/arm/mach-msm/cpufreq.c |
| 2 | * |
| 3 | * MSM architecture cpufreq driver |
| 4 | * |
| 5 | * Copyright (C) 2007 Google, Inc. |
| 6 | * Copyright (c) 2007-2010, Code Aurora Forum. All rights reserved. |
| 7 | * Author: Mike A. Chan <mikechan@google.com> |
| 8 | * |
| 9 | * This software is licensed under the terms of the GNU General Public |
| 10 | * License version 2, as published by the Free Software Foundation, and |
| 11 | * may be copied, distributed, and modified under those terms. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, |
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 16 | * GNU General Public License for more details. |
| 17 | * |
| 18 | */ |
| 19 | |
| 20 | #include <linux/earlysuspend.h> |
| 21 | #include <linux/init.h> |
| 22 | #include <linux/cpufreq.h> |
| 23 | #include <linux/workqueue.h> |
| 24 | #include <linux/completion.h> |
| 25 | #include <linux/cpu.h> |
| 26 | #include <linux/cpumask.h> |
| 27 | #include <linux/sched.h> |
| 28 | #include <linux/suspend.h> |
Stepan Moskovchenko | af25dd9 | 2011-08-05 18:12:48 -0700 | [diff] [blame] | 29 | #include <mach/socinfo.h> |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 30 | |
| 31 | #include "acpuclock.h" |
| 32 | |
| 33 | #ifdef CONFIG_SMP |
| 34 | struct cpufreq_work_struct { |
| 35 | struct work_struct work; |
| 36 | struct cpufreq_policy *policy; |
| 37 | struct completion complete; |
| 38 | int frequency; |
| 39 | int status; |
| 40 | }; |
| 41 | |
| 42 | static DEFINE_PER_CPU(struct cpufreq_work_struct, cpufreq_work); |
| 43 | static struct workqueue_struct *msm_cpufreq_wq; |
| 44 | #endif |
| 45 | |
| 46 | struct cpufreq_suspend_t { |
| 47 | struct mutex suspend_mutex; |
| 48 | int device_suspended; |
| 49 | }; |
| 50 | |
| 51 | static DEFINE_PER_CPU(struct cpufreq_suspend_t, cpufreq_suspend); |
| 52 | |
David Ng | c79a2e0 | 2011-03-26 06:13:32 -0700 | [diff] [blame] | 53 | static int override_cpu; |
| 54 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 55 | static int set_cpu_freq(struct cpufreq_policy *policy, unsigned int new_freq) |
| 56 | { |
| 57 | int ret = 0; |
| 58 | struct cpufreq_freqs freqs; |
| 59 | |
| 60 | freqs.old = policy->cur; |
David Ng | c79a2e0 | 2011-03-26 06:13:32 -0700 | [diff] [blame] | 61 | if (override_cpu) { |
| 62 | if (policy->cur == policy->max) |
| 63 | return 0; |
| 64 | else |
| 65 | freqs.new = policy->max; |
| 66 | } else |
| 67 | freqs.new = new_freq; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 68 | freqs.cpu = policy->cpu; |
| 69 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
| 70 | ret = acpuclk_set_rate(policy->cpu, new_freq, SETRATE_CPUFREQ); |
| 71 | if (!ret) |
| 72 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
| 73 | |
| 74 | return ret; |
| 75 | } |
| 76 | |
| 77 | #ifdef CONFIG_SMP |
| 78 | static void set_cpu_work(struct work_struct *work) |
| 79 | { |
| 80 | struct cpufreq_work_struct *cpu_work = |
| 81 | container_of(work, struct cpufreq_work_struct, work); |
| 82 | |
| 83 | cpu_work->status = set_cpu_freq(cpu_work->policy, cpu_work->frequency); |
| 84 | complete(&cpu_work->complete); |
| 85 | } |
| 86 | #endif |
| 87 | |
| 88 | static int msm_cpufreq_target(struct cpufreq_policy *policy, |
| 89 | unsigned int target_freq, |
| 90 | unsigned int relation) |
| 91 | { |
| 92 | int ret = -EFAULT; |
| 93 | int index; |
| 94 | struct cpufreq_frequency_table *table; |
| 95 | #ifdef CONFIG_SMP |
| 96 | struct cpufreq_work_struct *cpu_work = NULL; |
| 97 | cpumask_var_t mask; |
| 98 | |
| 99 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
| 100 | return -ENOMEM; |
| 101 | |
| 102 | if (!cpu_active(policy->cpu)) { |
| 103 | pr_info("cpufreq: cpu %d is not active.\n", policy->cpu); |
| 104 | return -ENODEV; |
| 105 | } |
| 106 | #endif |
| 107 | |
| 108 | mutex_lock(&per_cpu(cpufreq_suspend, policy->cpu).suspend_mutex); |
| 109 | |
| 110 | if (per_cpu(cpufreq_suspend, policy->cpu).device_suspended) { |
| 111 | pr_debug("cpufreq: cpu%d scheduling frequency change " |
| 112 | "in suspend.\n", policy->cpu); |
| 113 | ret = -EFAULT; |
| 114 | goto done; |
| 115 | } |
| 116 | |
| 117 | table = cpufreq_frequency_get_table(policy->cpu); |
| 118 | if (cpufreq_frequency_table_target(policy, table, target_freq, relation, |
| 119 | &index)) { |
| 120 | pr_err("cpufreq: invalid target_freq: %d\n", target_freq); |
| 121 | ret = -EINVAL; |
| 122 | goto done; |
| 123 | } |
| 124 | |
| 125 | #ifdef CONFIG_CPU_FREQ_DEBUG |
| 126 | pr_debug("CPU[%d] target %d relation %d (%d-%d) selected %d\n", |
| 127 | policy->cpu, target_freq, relation, |
| 128 | policy->min, policy->max, table[index].frequency); |
| 129 | #endif |
| 130 | |
| 131 | #ifdef CONFIG_SMP |
| 132 | cpu_work = &per_cpu(cpufreq_work, policy->cpu); |
| 133 | cpu_work->policy = policy; |
| 134 | cpu_work->frequency = table[index].frequency; |
| 135 | cpu_work->status = -ENODEV; |
| 136 | |
| 137 | cpumask_clear(mask); |
| 138 | cpumask_set_cpu(policy->cpu, mask); |
| 139 | if (cpumask_equal(mask, ¤t->cpus_allowed)) { |
| 140 | ret = set_cpu_freq(cpu_work->policy, cpu_work->frequency); |
| 141 | goto done; |
| 142 | } else { |
| 143 | cancel_work_sync(&cpu_work->work); |
| 144 | INIT_COMPLETION(cpu_work->complete); |
| 145 | queue_work_on(policy->cpu, msm_cpufreq_wq, &cpu_work->work); |
| 146 | wait_for_completion(&cpu_work->complete); |
| 147 | } |
| 148 | |
| 149 | free_cpumask_var(mask); |
| 150 | ret = cpu_work->status; |
| 151 | #else |
| 152 | ret = set_cpu_freq(policy, table[index].frequency); |
| 153 | #endif |
| 154 | |
| 155 | done: |
| 156 | mutex_unlock(&per_cpu(cpufreq_suspend, policy->cpu).suspend_mutex); |
| 157 | return ret; |
| 158 | } |
| 159 | |
| 160 | static int msm_cpufreq_verify(struct cpufreq_policy *policy) |
| 161 | { |
| 162 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, |
| 163 | policy->cpuinfo.max_freq); |
| 164 | return 0; |
| 165 | } |
| 166 | |
| 167 | static int __cpuinit msm_cpufreq_init(struct cpufreq_policy *policy) |
| 168 | { |
| 169 | int cur_freq; |
| 170 | int index; |
| 171 | struct cpufreq_frequency_table *table; |
| 172 | #ifdef CONFIG_SMP |
| 173 | struct cpufreq_work_struct *cpu_work = NULL; |
| 174 | #endif |
| 175 | |
Stepan Moskovchenko | af25dd9 | 2011-08-05 18:12:48 -0700 | [diff] [blame] | 176 | if (cpu_is_apq8064()) |
| 177 | return -ENODEV; |
| 178 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 179 | table = cpufreq_frequency_get_table(policy->cpu); |
| 180 | if (cpufreq_frequency_table_cpuinfo(policy, table)) { |
| 181 | #ifdef CONFIG_MSM_CPU_FREQ_SET_MIN_MAX |
| 182 | policy->cpuinfo.min_freq = CONFIG_MSM_CPU_FREQ_MIN; |
| 183 | policy->cpuinfo.max_freq = CONFIG_MSM_CPU_FREQ_MAX; |
| 184 | #endif |
| 185 | } |
| 186 | #ifdef CONFIG_MSM_CPU_FREQ_SET_MIN_MAX |
| 187 | policy->min = CONFIG_MSM_CPU_FREQ_MIN; |
| 188 | policy->max = CONFIG_MSM_CPU_FREQ_MAX; |
| 189 | #endif |
| 190 | |
| 191 | cur_freq = acpuclk_get_rate(policy->cpu); |
| 192 | if (cpufreq_frequency_table_target(policy, table, cur_freq, |
| 193 | CPUFREQ_RELATION_H, &index)) { |
| 194 | pr_info("cpufreq: cpu%d at invalid freq: %d\n", |
| 195 | policy->cpu, cur_freq); |
| 196 | return -EINVAL; |
| 197 | } |
| 198 | |
| 199 | if (cur_freq != table[index].frequency) { |
| 200 | int ret = 0; |
| 201 | ret = acpuclk_set_rate(policy->cpu, table[index].frequency, |
| 202 | SETRATE_CPUFREQ); |
| 203 | if (ret) |
| 204 | return ret; |
| 205 | pr_info("cpufreq: cpu%d init at %d switching to %d\n", |
| 206 | policy->cpu, cur_freq, table[index].frequency); |
| 207 | cur_freq = table[index].frequency; |
| 208 | } |
| 209 | |
| 210 | policy->cur = cur_freq; |
| 211 | |
| 212 | policy->cpuinfo.transition_latency = |
| 213 | acpuclk_get_switch_time() * NSEC_PER_USEC; |
| 214 | #ifdef CONFIG_SMP |
| 215 | cpu_work = &per_cpu(cpufreq_work, policy->cpu); |
| 216 | INIT_WORK(&cpu_work->work, set_cpu_work); |
| 217 | init_completion(&cpu_work->complete); |
| 218 | #endif |
| 219 | |
| 220 | return 0; |
| 221 | } |
| 222 | |
| 223 | static int msm_cpufreq_suspend(void) |
| 224 | { |
| 225 | int cpu; |
| 226 | |
| 227 | for_each_possible_cpu(cpu) { |
| 228 | mutex_lock(&per_cpu(cpufreq_suspend, cpu).suspend_mutex); |
| 229 | per_cpu(cpufreq_suspend, cpu).device_suspended = 1; |
| 230 | mutex_unlock(&per_cpu(cpufreq_suspend, cpu).suspend_mutex); |
| 231 | } |
| 232 | |
| 233 | return NOTIFY_DONE; |
| 234 | } |
| 235 | |
| 236 | static int msm_cpufreq_resume(void) |
| 237 | { |
| 238 | int cpu; |
| 239 | |
| 240 | for_each_possible_cpu(cpu) { |
| 241 | per_cpu(cpufreq_suspend, cpu).device_suspended = 0; |
| 242 | } |
| 243 | |
| 244 | return NOTIFY_DONE; |
| 245 | } |
| 246 | |
| 247 | static int msm_cpufreq_pm_event(struct notifier_block *this, |
| 248 | unsigned long event, void *ptr) |
| 249 | { |
| 250 | switch (event) { |
| 251 | case PM_POST_HIBERNATION: |
| 252 | case PM_POST_SUSPEND: |
| 253 | return msm_cpufreq_resume(); |
| 254 | case PM_HIBERNATION_PREPARE: |
| 255 | case PM_SUSPEND_PREPARE: |
| 256 | return msm_cpufreq_suspend(); |
| 257 | default: |
| 258 | return NOTIFY_DONE; |
| 259 | } |
| 260 | } |
| 261 | |
David Ng | c79a2e0 | 2011-03-26 06:13:32 -0700 | [diff] [blame] | 262 | static ssize_t store_mfreq(struct sysdev_class *class, |
| 263 | struct sysdev_class_attribute *attr, |
| 264 | const char *buf, size_t count) |
| 265 | { |
| 266 | u64 val; |
| 267 | |
| 268 | if (strict_strtoull(buf, 0, &val) < 0) { |
| 269 | pr_err("Invalid parameter to mfreq\n"); |
| 270 | return 0; |
| 271 | } |
| 272 | if (val) |
| 273 | override_cpu = 1; |
| 274 | else |
| 275 | override_cpu = 0; |
| 276 | return count; |
| 277 | } |
| 278 | |
| 279 | static SYSDEV_CLASS_ATTR(mfreq, 0200, NULL, store_mfreq); |
| 280 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 281 | static struct cpufreq_driver msm_cpufreq_driver = { |
| 282 | /* lps calculations are handled here. */ |
| 283 | .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS, |
| 284 | .init = msm_cpufreq_init, |
| 285 | .verify = msm_cpufreq_verify, |
| 286 | .target = msm_cpufreq_target, |
| 287 | .name = "msm", |
| 288 | }; |
| 289 | |
| 290 | static struct notifier_block msm_cpufreq_pm_notifier = { |
| 291 | .notifier_call = msm_cpufreq_pm_event, |
| 292 | }; |
| 293 | |
| 294 | static int __init msm_cpufreq_register(void) |
| 295 | { |
| 296 | int cpu; |
| 297 | |
David Ng | c79a2e0 | 2011-03-26 06:13:32 -0700 | [diff] [blame] | 298 | int err = sysfs_create_file(&cpu_sysdev_class.kset.kobj, |
| 299 | &attr_mfreq.attr); |
| 300 | if (err) |
| 301 | pr_err("Failed to create sysfs mfreq\n"); |
| 302 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 303 | for_each_possible_cpu(cpu) { |
| 304 | mutex_init(&(per_cpu(cpufreq_suspend, cpu).suspend_mutex)); |
| 305 | per_cpu(cpufreq_suspend, cpu).device_suspended = 0; |
| 306 | } |
| 307 | |
| 308 | #ifdef CONFIG_SMP |
| 309 | msm_cpufreq_wq = create_workqueue("msm-cpufreq"); |
| 310 | #endif |
| 311 | |
| 312 | register_pm_notifier(&msm_cpufreq_pm_notifier); |
| 313 | return cpufreq_register_driver(&msm_cpufreq_driver); |
| 314 | } |
| 315 | |
| 316 | late_initcall(msm_cpufreq_register); |
| 317 | |