blob: 24a1e969e4eb1e408ec2671a618428bac872859e [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* arch/arm/mach-msm/cpufreq.c
2 *
3 * MSM architecture cpufreq driver
4 *
5 * Copyright (C) 2007 Google, Inc.
Duy Truong790f06d2013-02-13 16:38:12 -08006 * Copyright (c) 2007-2012, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007 * Author: Mike A. Chan <mikechan@google.com>
8 *
9 * This software is licensed under the terms of the GNU General Public
10 * License version 2, as published by the Free Software Foundation, and
11 * may be copied, distributed, and modified under those terms.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19
20#include <linux/earlysuspend.h>
21#include <linux/init.h>
Praveen Chidambaram696a5612012-05-25 17:29:11 -060022#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070023#include <linux/cpufreq.h>
24#include <linux/workqueue.h>
25#include <linux/completion.h>
26#include <linux/cpu.h>
27#include <linux/cpumask.h>
28#include <linux/sched.h>
29#include <linux/suspend.h>
Stepan Moskovchenkoaf25dd92011-08-05 18:12:48 -070030#include <mach/socinfo.h>
Praveen Chidambaram696a5612012-05-25 17:29:11 -060031#include <mach/cpufreq.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070032
33#include "acpuclock.h"
34
Praveen Chidambaram836f8452013-03-11 14:50:06 -060035struct cpufreq_work_struct {
36 struct work_struct work;
37 struct cpufreq_policy *policy;
38 struct completion complete;
39 int frequency;
40 int status;
41};
42
43static DEFINE_PER_CPU(struct cpufreq_work_struct, cpufreq_work);
44static struct workqueue_struct *msm_cpufreq_wq;
45
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046struct cpufreq_suspend_t {
47 struct mutex suspend_mutex;
48 int device_suspended;
49};
50
51static DEFINE_PER_CPU(struct cpufreq_suspend_t, cpufreq_suspend);
52
Praveen Chidambaram696a5612012-05-25 17:29:11 -060053struct cpu_freq {
54 uint32_t max;
55 uint32_t min;
56 uint32_t allowed_max;
57 uint32_t allowed_min;
58 uint32_t limits_init;
59};
60
61static DEFINE_PER_CPU(struct cpu_freq, cpu_freq_info);
62
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070063static int set_cpu_freq(struct cpufreq_policy *policy, unsigned int new_freq)
64{
65 int ret = 0;
Narayanan Gopalakrishnan4e8a0bf2012-11-01 10:08:55 -070066 int saved_sched_policy = -EINVAL;
67 int saved_sched_rt_prio = -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068 struct cpufreq_freqs freqs;
Praveen Chidambaram696a5612012-05-25 17:29:11 -060069 struct cpu_freq *limit = &per_cpu(cpu_freq_info, policy->cpu);
Narayanan Gopalakrishnan4e8a0bf2012-11-01 10:08:55 -070070 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
Praveen Chidambaram696a5612012-05-25 17:29:11 -060071
72 if (limit->limits_init) {
73 if (new_freq > limit->allowed_max) {
74 new_freq = limit->allowed_max;
75 pr_debug("max: limiting freq to %d\n", new_freq);
76 }
77
78 if (new_freq < limit->allowed_min) {
79 new_freq = limit->allowed_min;
80 pr_debug("min: limiting freq to %d\n", new_freq);
81 }
82 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070083
84 freqs.old = policy->cur;
David Ng3f76d272012-02-08 10:43:37 -080085 freqs.new = new_freq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070086 freqs.cpu = policy->cpu;
Narayanan Gopalakrishnan4e8a0bf2012-11-01 10:08:55 -070087
88 /*
89 * Put the caller into SCHED_FIFO priority to avoid cpu starvation
90 * in the acpuclk_set_rate path while increasing frequencies
91 */
92
93 if (freqs.new > freqs.old && current->policy != SCHED_FIFO) {
94 saved_sched_policy = current->policy;
95 saved_sched_rt_prio = current->rt_priority;
96 sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
97 }
98
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070099 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
Narayanan Gopalakrishnan4e8a0bf2012-11-01 10:08:55 -0700100
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700101 ret = acpuclk_set_rate(policy->cpu, new_freq, SETRATE_CPUFREQ);
102 if (!ret)
103 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
104
Narayanan Gopalakrishnan4e8a0bf2012-11-01 10:08:55 -0700105 /* Restore priority after clock ramp-up */
106 if (freqs.new > freqs.old && saved_sched_policy >= 0) {
107 param.sched_priority = saved_sched_rt_prio;
108 sched_setscheduler_nocheck(current, saved_sched_policy, &param);
109 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700110 return ret;
111}
112
Praveen Chidambaram836f8452013-03-11 14:50:06 -0600113static void set_cpu_work(struct work_struct *work)
114{
115 struct cpufreq_work_struct *cpu_work =
116 container_of(work, struct cpufreq_work_struct, work);
117
118 cpu_work->status = set_cpu_freq(cpu_work->policy, cpu_work->frequency);
119 complete(&cpu_work->complete);
120}
121
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700122static int msm_cpufreq_target(struct cpufreq_policy *policy,
123 unsigned int target_freq,
124 unsigned int relation)
125{
126 int ret = -EFAULT;
127 int index;
128 struct cpufreq_frequency_table *table;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700129
Praveen Chidambaram836f8452013-03-11 14:50:06 -0600130 struct cpufreq_work_struct *cpu_work = NULL;
131 cpumask_var_t mask;
132
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700133 if (!cpu_active(policy->cpu)) {
134 pr_info("cpufreq: cpu %d is not active.\n", policy->cpu);
135 return -ENODEV;
136 }
Praveen Chidambaramfdf788b2012-04-03 17:46:09 -0600137
Praveen Chidambaram836f8452013-03-11 14:50:06 -0600138 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
139 return -ENOMEM;
140
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700141 mutex_lock(&per_cpu(cpufreq_suspend, policy->cpu).suspend_mutex);
142
143 if (per_cpu(cpufreq_suspend, policy->cpu).device_suspended) {
144 pr_debug("cpufreq: cpu%d scheduling frequency change "
145 "in suspend.\n", policy->cpu);
146 ret = -EFAULT;
147 goto done;
148 }
149
150 table = cpufreq_frequency_get_table(policy->cpu);
151 if (cpufreq_frequency_table_target(policy, table, target_freq, relation,
152 &index)) {
153 pr_err("cpufreq: invalid target_freq: %d\n", target_freq);
154 ret = -EINVAL;
155 goto done;
156 }
157
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700158 pr_debug("CPU[%d] target %d relation %d (%d-%d) selected %d\n",
159 policy->cpu, target_freq, relation,
160 policy->min, policy->max, table[index].frequency);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700161
Praveen Chidambaram836f8452013-03-11 14:50:06 -0600162 cpu_work = &per_cpu(cpufreq_work, policy->cpu);
163 cpu_work->policy = policy;
164 cpu_work->frequency = table[index].frequency;
165 cpu_work->status = -ENODEV;
166
167 cpumask_clear(mask);
168 cpumask_set_cpu(policy->cpu, mask);
169 if (cpumask_equal(mask, &current->cpus_allowed)) {
170 ret = set_cpu_freq(cpu_work->policy, cpu_work->frequency);
171 goto done;
172 } else {
173 cancel_work_sync(&cpu_work->work);
174 INIT_COMPLETION(cpu_work->complete);
175 queue_work_on(policy->cpu, msm_cpufreq_wq, &cpu_work->work);
176 wait_for_completion(&cpu_work->complete);
177 }
178
179 ret = cpu_work->status;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180
181done:
Praveen Chidambaram836f8452013-03-11 14:50:06 -0600182 free_cpumask_var(mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183 mutex_unlock(&per_cpu(cpufreq_suspend, policy->cpu).suspend_mutex);
184 return ret;
185}
186
187static int msm_cpufreq_verify(struct cpufreq_policy *policy)
188{
189 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
190 policy->cpuinfo.max_freq);
191 return 0;
192}
193
Praveen Chidambaram696a5612012-05-25 17:29:11 -0600194static unsigned int msm_cpufreq_get_freq(unsigned int cpu)
195{
196 return acpuclk_get_rate(cpu);
197}
198
199static inline int msm_cpufreq_limits_init(void)
200{
201 int cpu = 0;
202 int i = 0;
203 struct cpufreq_frequency_table *table = NULL;
204 uint32_t min = (uint32_t) -1;
205 uint32_t max = 0;
206 struct cpu_freq *limit = NULL;
207
208 for_each_possible_cpu(cpu) {
209 limit = &per_cpu(cpu_freq_info, cpu);
210 table = cpufreq_frequency_get_table(cpu);
211 if (table == NULL) {
212 pr_err("%s: error reading cpufreq table for cpu %d\n",
213 __func__, cpu);
214 continue;
215 }
216 for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
217 if (table[i].frequency > max)
218 max = table[i].frequency;
219 if (table[i].frequency < min)
220 min = table[i].frequency;
221 }
222 limit->allowed_min = min;
223 limit->allowed_max = max;
224 limit->min = min;
225 limit->max = max;
226 limit->limits_init = 1;
227 }
228
229 return 0;
230}
231
232int msm_cpufreq_set_freq_limits(uint32_t cpu, uint32_t min, uint32_t max)
233{
234 struct cpu_freq *limit = &per_cpu(cpu_freq_info, cpu);
235
236 if (!limit->limits_init)
237 msm_cpufreq_limits_init();
238
239 if ((min != MSM_CPUFREQ_NO_LIMIT) &&
240 min >= limit->min && min <= limit->max)
241 limit->allowed_min = min;
242 else
243 limit->allowed_min = limit->min;
244
245
246 if ((max != MSM_CPUFREQ_NO_LIMIT) &&
247 max <= limit->max && max >= limit->min)
248 limit->allowed_max = max;
249 else
250 limit->allowed_max = limit->max;
251
252 pr_debug("%s: Limiting cpu %d min = %d, max = %d\n",
253 __func__, cpu,
254 limit->allowed_min, limit->allowed_max);
255
256 return 0;
257}
258EXPORT_SYMBOL(msm_cpufreq_set_freq_limits);
259
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260static int __cpuinit msm_cpufreq_init(struct cpufreq_policy *policy)
261{
262 int cur_freq;
263 int index;
264 struct cpufreq_frequency_table *table;
Praveen Chidambaram836f8452013-03-11 14:50:06 -0600265 struct cpufreq_work_struct *cpu_work = NULL;
Pankaj Kumaref7c3942012-04-03 18:00:52 +0530266
267 table = cpufreq_frequency_get_table(policy->cpu);
268 if (table == NULL)
269 return -ENODEV;
Pankaj Kumar0f263682012-01-05 17:01:57 +0530270 /*
Patrick Dalyd80251b2013-01-22 17:30:27 -0800271 * In 8625, 8610, and 8226 both cpu core's frequency can not
Pankaj Kumar0f263682012-01-05 17:01:57 +0530272 * be changed independently. Each cpu is bound to
273 * same frequency. Hence set the cpumask to all cpu.
274 */
Patrick Dalyd80251b2013-01-22 17:30:27 -0800275 if (cpu_is_msm8625() || cpu_is_msm8625q() || cpu_is_msm8226()
276 || cpu_is_msm8610())
Pankaj Kumar0f263682012-01-05 17:01:57 +0530277 cpumask_setall(policy->cpus);
278
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700279 if (cpufreq_frequency_table_cpuinfo(policy, table)) {
280#ifdef CONFIG_MSM_CPU_FREQ_SET_MIN_MAX
281 policy->cpuinfo.min_freq = CONFIG_MSM_CPU_FREQ_MIN;
282 policy->cpuinfo.max_freq = CONFIG_MSM_CPU_FREQ_MAX;
283#endif
284 }
285#ifdef CONFIG_MSM_CPU_FREQ_SET_MIN_MAX
286 policy->min = CONFIG_MSM_CPU_FREQ_MIN;
287 policy->max = CONFIG_MSM_CPU_FREQ_MAX;
288#endif
289
290 cur_freq = acpuclk_get_rate(policy->cpu);
291 if (cpufreq_frequency_table_target(policy, table, cur_freq,
Matt Wagantallb31e4682011-10-12 12:50:27 -0700292 CPUFREQ_RELATION_H, &index) &&
293 cpufreq_frequency_table_target(policy, table, cur_freq,
294 CPUFREQ_RELATION_L, &index)) {
Praveen Chidambaram836f8452013-03-11 14:50:06 -0600295 pr_info("cpufreq: cpu%d at invalid freq: %d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700296 policy->cpu, cur_freq);
297 return -EINVAL;
298 }
299
300 if (cur_freq != table[index].frequency) {
301 int ret = 0;
302 ret = acpuclk_set_rate(policy->cpu, table[index].frequency,
303 SETRATE_CPUFREQ);
304 if (ret)
305 return ret;
306 pr_info("cpufreq: cpu%d init at %d switching to %d\n",
307 policy->cpu, cur_freq, table[index].frequency);
308 cur_freq = table[index].frequency;
309 }
310
311 policy->cur = cur_freq;
312
313 policy->cpuinfo.transition_latency =
314 acpuclk_get_switch_time() * NSEC_PER_USEC;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700315
Praveen Chidambaram836f8452013-03-11 14:50:06 -0600316 cpu_work = &per_cpu(cpufreq_work, policy->cpu);
317 INIT_WORK(&cpu_work->work, set_cpu_work);
318 init_completion(&cpu_work->complete);
319
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700320 return 0;
321}
322
Praveen Chidambarambe01a172012-09-18 13:42:40 -0600323static int __cpuinit msm_cpufreq_cpu_callback(struct notifier_block *nfb,
324 unsigned long action, void *hcpu)
325{
326 unsigned int cpu = (unsigned long)hcpu;
327
328 switch (action) {
329 case CPU_ONLINE:
330 case CPU_ONLINE_FROZEN:
331 per_cpu(cpufreq_suspend, cpu).device_suspended = 0;
332 break;
333 case CPU_DOWN_PREPARE:
334 case CPU_DOWN_PREPARE_FROZEN:
335 mutex_lock(&per_cpu(cpufreq_suspend, cpu).suspend_mutex);
336 per_cpu(cpufreq_suspend, cpu).device_suspended = 1;
337 mutex_unlock(&per_cpu(cpufreq_suspend, cpu).suspend_mutex);
338 break;
339 case CPU_DOWN_FAILED:
340 case CPU_DOWN_FAILED_FROZEN:
341 per_cpu(cpufreq_suspend, cpu).device_suspended = 0;
342 break;
343 }
344
345 return NOTIFY_OK;
346}
347
348static struct notifier_block __refdata msm_cpufreq_cpu_notifier = {
349 .notifier_call = msm_cpufreq_cpu_callback,
350};
351
Anji Jonnalaf8732322012-12-13 14:03:54 +0530352/*
353 * Define suspend/resume for cpufreq_driver. Kernel will call
354 * these during suspend/resume with interrupts disabled. This
355 * helps the suspend/resume variable get's updated before cpufreq
356 * governor tries to change the frequency after coming out of suspend.
357 */
358static int msm_cpufreq_suspend(struct cpufreq_policy *policy)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700359{
360 int cpu;
361
362 for_each_possible_cpu(cpu) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700363 per_cpu(cpufreq_suspend, cpu).device_suspended = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700364 }
365
Anji Jonnalaf8732322012-12-13 14:03:54 +0530366 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700367}
368
Anji Jonnalaf8732322012-12-13 14:03:54 +0530369static int msm_cpufreq_resume(struct cpufreq_policy *policy)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700370{
371 int cpu;
372
373 for_each_possible_cpu(cpu) {
374 per_cpu(cpufreq_suspend, cpu).device_suspended = 0;
375 }
376
Anji Jonnalaf8732322012-12-13 14:03:54 +0530377 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700378}
379
Stepan Moskovchenko5627bb42011-10-13 16:25:41 -0700380static struct freq_attr *msm_freq_attr[] = {
381 &cpufreq_freq_attr_scaling_available_freqs,
382 NULL,
383};
384
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700385static struct cpufreq_driver msm_cpufreq_driver = {
386 /* lps calculations are handled here. */
387 .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS,
388 .init = msm_cpufreq_init,
389 .verify = msm_cpufreq_verify,
390 .target = msm_cpufreq_target,
Praveen Chidambaram696a5612012-05-25 17:29:11 -0600391 .get = msm_cpufreq_get_freq,
Anji Jonnalaf8732322012-12-13 14:03:54 +0530392 .suspend = msm_cpufreq_suspend,
393 .resume = msm_cpufreq_resume,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700394 .name = "msm",
Stepan Moskovchenko5627bb42011-10-13 16:25:41 -0700395 .attr = msm_freq_attr,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396};
397
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700398static int __init msm_cpufreq_register(void)
399{
400 int cpu;
401
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700402 for_each_possible_cpu(cpu) {
403 mutex_init(&(per_cpu(cpufreq_suspend, cpu).suspend_mutex));
404 per_cpu(cpufreq_suspend, cpu).device_suspended = 0;
405 }
406
Praveen Chidambaram836f8452013-03-11 14:50:06 -0600407 msm_cpufreq_wq = create_workqueue("msm-cpufreq");
Narayanan Gopalakrishnan56f5a6d2012-07-17 16:07:50 -0700408 register_hotcpu_notifier(&msm_cpufreq_cpu_notifier);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700409
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700410 return cpufreq_register_driver(&msm_cpufreq_driver);
411}
412
Praveen Chidambaramfeea1b82013-04-30 15:25:08 -0600413device_initcall(msm_cpufreq_register);