blob: d31038082003b746882d157081a0d1338ed8c2af [file] [log] [blame]
Junjie Wub8be3332015-02-03 14:31:09 -08001/* drivers/cpufreq/qcom-cpufreq.c
Stephen Boydce5a5782014-08-01 15:39:54 -07002 *
3 * MSM architecture cpufreq driver
4 *
5 * Copyright (C) 2007 Google, Inc.
Santosh Mardiefe63672017-01-12 16:48:53 +05306 * Copyright (c) 2007-2017, The Linux Foundation. All rights reserved.
Stephen Boydce5a5782014-08-01 15:39:54 -07007 * Author: Mike A. Chan <mikechan@google.com>
8 *
9 * This software is licensed under the terms of the GNU General Public
10 * License version 2, as published by the Free Software Foundation, and
11 * may be copied, distributed, and modified under those terms.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/cpufreq.h>
23#include <linux/cpu.h>
24#include <linux/cpumask.h>
Stephen Boydce5a5782014-08-01 15:39:54 -070025#include <linux/suspend.h>
26#include <linux/clk.h>
27#include <linux/err.h>
28#include <linux/platform_device.h>
29#include <linux/of.h>
30#include <trace/events/power.h>
31
32static DEFINE_MUTEX(l2bw_lock);
33
34static struct clk *cpu_clk[NR_CPUS];
35static struct clk *l2_clk;
36static DEFINE_PER_CPU(struct cpufreq_frequency_table *, freq_table);
37static bool hotplug_ready;
38
39struct cpufreq_suspend_t {
40 struct mutex suspend_mutex;
41 int device_suspended;
42};
43
44static DEFINE_PER_CPU(struct cpufreq_suspend_t, suspend_data);
Pavankumar Kondetid9d13ae2017-10-18 08:28:16 +053045static DEFINE_PER_CPU(int, cached_resolve_idx);
46static DEFINE_PER_CPU(unsigned int, cached_resolve_freq);
Stephen Boydce5a5782014-08-01 15:39:54 -070047
48static int set_cpu_freq(struct cpufreq_policy *policy, unsigned int new_freq,
49 unsigned int index)
50{
51 int ret = 0;
Stephen Boydce5a5782014-08-01 15:39:54 -070052 struct cpufreq_freqs freqs;
Stephen Boydce5a5782014-08-01 15:39:54 -070053 unsigned long rate;
54
55 freqs.old = policy->cur;
56 freqs.new = new_freq;
57 freqs.cpu = policy->cpu;
58
Stephen Boydce5a5782014-08-01 15:39:54 -070059 trace_cpu_frequency_switch_start(freqs.old, freqs.new, policy->cpu);
60 cpufreq_freq_transition_begin(policy, &freqs);
61
62 rate = new_freq * 1000;
63 rate = clk_round_rate(cpu_clk[policy->cpu], rate);
64 ret = clk_set_rate(cpu_clk[policy->cpu], rate);
65 cpufreq_freq_transition_end(policy, &freqs, ret);
66 if (!ret)
67 trace_cpu_frequency_switch_end(policy->cpu);
68
Stephen Boydce5a5782014-08-01 15:39:54 -070069 return ret;
70}
71
72static int msm_cpufreq_target(struct cpufreq_policy *policy,
73 unsigned int target_freq,
74 unsigned int relation)
75{
Junjie Wu72881072015-06-10 17:57:07 -070076 int ret = 0;
Stephen Boydce5a5782014-08-01 15:39:54 -070077 int index;
78 struct cpufreq_frequency_table *table;
Pavankumar Kondetid9d13ae2017-10-18 08:28:16 +053079 int first_cpu = cpumask_first(policy->related_cpus);
Stephen Boydce5a5782014-08-01 15:39:54 -070080
81 mutex_lock(&per_cpu(suspend_data, policy->cpu).suspend_mutex);
82
Junjie Wu72881072015-06-10 17:57:07 -070083 if (target_freq == policy->cur)
84 goto done;
85
Stephen Boydce5a5782014-08-01 15:39:54 -070086 if (per_cpu(suspend_data, policy->cpu).device_suspended) {
87 pr_debug("cpufreq: cpu%d scheduling frequency change in suspend.\n",
88 policy->cpu);
89 ret = -EFAULT;
90 goto done;
91 }
92
Stephen Boydcf930d62017-03-02 14:22:37 -080093 table = policy->freq_table;
Pavankumar Kondetid9d13ae2017-10-18 08:28:16 +053094 if (per_cpu(cached_resolve_freq, first_cpu) == target_freq)
95 index = per_cpu(cached_resolve_idx, first_cpu);
96 else
97 index = cpufreq_frequency_table_target(policy, target_freq,
98 relation);
Stephen Boydce5a5782014-08-01 15:39:54 -070099
100 pr_debug("CPU[%d] target %d relation %d (%d-%d) selected %d\n",
101 policy->cpu, target_freq, relation,
102 policy->min, policy->max, table[index].frequency);
103
104 ret = set_cpu_freq(policy, table[index].frequency,
105 table[index].driver_data);
106done:
107 mutex_unlock(&per_cpu(suspend_data, policy->cpu).suspend_mutex);
108 return ret;
109}
110
Pavankumar Kondeti59386c82017-09-04 11:51:19 +0530111static unsigned int msm_cpufreq_resolve_freq(struct cpufreq_policy *policy,
112 unsigned int target_freq)
113{
114 int index;
Pavankumar Kondetid9d13ae2017-10-18 08:28:16 +0530115 int first_cpu = cpumask_first(policy->related_cpus);
116 unsigned int freq;
Pavankumar Kondeti59386c82017-09-04 11:51:19 +0530117
118 index = cpufreq_frequency_table_target(policy, target_freq,
119 CPUFREQ_RELATION_L);
Pavankumar Kondetid9d13ae2017-10-18 08:28:16 +0530120 freq = policy->freq_table[index].frequency;
121
122 per_cpu(cached_resolve_idx, first_cpu) = index;
123 per_cpu(cached_resolve_freq, first_cpu) = freq;
124
125 return freq;
Pavankumar Kondeti59386c82017-09-04 11:51:19 +0530126}
127
Stephen Boydce5a5782014-08-01 15:39:54 -0700128static int msm_cpufreq_verify(struct cpufreq_policy *policy)
129{
130 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
131 policy->cpuinfo.max_freq);
132 return 0;
133}
134
135static unsigned int msm_cpufreq_get_freq(unsigned int cpu)
136{
137 return clk_get_rate(cpu_clk[cpu]) / 1000;
138}
139
140static int msm_cpufreq_init(struct cpufreq_policy *policy)
141{
142 int cur_freq;
143 int index;
144 int ret = 0;
145 struct cpufreq_frequency_table *table =
146 per_cpu(freq_table, policy->cpu);
147 int cpu;
148
149 /*
150 * In some SoC, some cores are clocked by same source, and their
151 * frequencies can not be changed independently. Find all other
152 * CPUs that share same clock, and mark them as controlled by
153 * same policy.
154 */
155 for_each_possible_cpu(cpu)
156 if (cpu_clk[cpu] == cpu_clk[policy->cpu])
157 cpumask_set_cpu(cpu, policy->cpus);
158
159 ret = cpufreq_table_validate_and_show(policy, table);
160 if (ret) {
161 pr_err("cpufreq: failed to get policy min/max\n");
162 return ret;
163 }
164
165 cur_freq = clk_get_rate(cpu_clk[policy->cpu])/1000;
166
Stephen Boyd4ade5232017-03-02 14:24:13 -0800167 index = cpufreq_frequency_table_target(policy, cur_freq,
168 CPUFREQ_RELATION_H);
Stephen Boydce5a5782014-08-01 15:39:54 -0700169 /*
170 * Call set_cpu_freq unconditionally so that when cpu is set to
171 * online, frequency limit will always be updated.
172 */
173 ret = set_cpu_freq(policy, table[index].frequency,
174 table[index].driver_data);
175 if (ret)
176 return ret;
177 pr_debug("cpufreq: cpu%d init at %d switching to %d\n",
178 policy->cpu, cur_freq, table[index].frequency);
179 policy->cur = table[index].frequency;
180
181 return 0;
182}
183
Stephen Boyd6208a582017-03-02 15:09:13 -0800184static int qcom_cpufreq_dead_cpu(unsigned int cpu)
Stephen Boydce5a5782014-08-01 15:39:54 -0700185{
Stephen Boyd6208a582017-03-02 15:09:13 -0800186 /* Fail hotplug until this driver can get CPU clocks */
187 if (!hotplug_ready)
188 return -EINVAL;
189
190 clk_unprepare(cpu_clk[cpu]);
191 clk_unprepare(l2_clk);
192 return 0;
193}
194
195static int qcom_cpufreq_up_cpu(unsigned int cpu)
196{
Stephen Boydce5a5782014-08-01 15:39:54 -0700197 int rc;
198
199 /* Fail hotplug until this driver can get CPU clocks */
200 if (!hotplug_ready)
Stephen Boyd6208a582017-03-02 15:09:13 -0800201 return -EINVAL;
Stephen Boydce5a5782014-08-01 15:39:54 -0700202
Stephen Boyd6208a582017-03-02 15:09:13 -0800203 rc = clk_prepare(l2_clk);
204 if (rc < 0)
205 return rc;
206 rc = clk_prepare(cpu_clk[cpu]);
207 if (rc < 0)
Stephen Boydce5a5782014-08-01 15:39:54 -0700208 clk_unprepare(l2_clk);
Stephen Boyd6208a582017-03-02 15:09:13 -0800209 return rc;
Stephen Boydce5a5782014-08-01 15:39:54 -0700210}
211
Stephen Boyd6208a582017-03-02 15:09:13 -0800212static int qcom_cpufreq_dying_cpu(unsigned int cpu)
213{
214 /* Fail hotplug until this driver can get CPU clocks */
215 if (!hotplug_ready)
216 return -EINVAL;
217
218 clk_disable(cpu_clk[cpu]);
219 clk_disable(l2_clk);
220 return 0;
221}
222
223static int qcom_cpufreq_starting_cpu(unsigned int cpu)
224{
225 int rc;
226
227 /* Fail hotplug until this driver can get CPU clocks */
228 if (!hotplug_ready)
229 return -EINVAL;
230
231 rc = clk_enable(l2_clk);
232 if (rc < 0)
233 return rc;
234 rc = clk_enable(cpu_clk[cpu]);
235 if (rc < 0)
236 clk_disable(l2_clk);
237 return rc;
238}
Stephen Boydce5a5782014-08-01 15:39:54 -0700239
240static int msm_cpufreq_suspend(void)
241{
242 int cpu;
243
244 for_each_possible_cpu(cpu) {
245 mutex_lock(&per_cpu(suspend_data, cpu).suspend_mutex);
246 per_cpu(suspend_data, cpu).device_suspended = 1;
247 mutex_unlock(&per_cpu(suspend_data, cpu).suspend_mutex);
248 }
249
250 return NOTIFY_DONE;
251}
252
253static int msm_cpufreq_resume(void)
254{
Junjie Wud5261b02014-12-01 21:21:00 -0800255 int cpu, ret;
256 struct cpufreq_policy policy;
Stephen Boydce5a5782014-08-01 15:39:54 -0700257
258 for_each_possible_cpu(cpu) {
259 per_cpu(suspend_data, cpu).device_suspended = 0;
260 }
261
Junjie Wud5261b02014-12-01 21:21:00 -0800262 /*
263 * Freq request might be rejected during suspend, resulting
264 * in policy->cur violating min/max constraint.
265 * Correct the frequency as soon as possible.
266 */
267 get_online_cpus();
268 for_each_online_cpu(cpu) {
269 ret = cpufreq_get_policy(&policy, cpu);
270 if (ret)
271 continue;
272 if (policy.cur <= policy.max && policy.cur >= policy.min)
273 continue;
274 ret = cpufreq_update_policy(cpu);
275 if (ret)
276 pr_info("cpufreq: Current frequency violates policy min/max for CPU%d\n",
277 cpu);
278 else
279 pr_info("cpufreq: Frequency violation fixed for CPU%d\n",
280 cpu);
281 }
282 put_online_cpus();
283
Stephen Boydce5a5782014-08-01 15:39:54 -0700284 return NOTIFY_DONE;
285}
286
287static int msm_cpufreq_pm_event(struct notifier_block *this,
288 unsigned long event, void *ptr)
289{
290 switch (event) {
291 case PM_POST_HIBERNATION:
292 case PM_POST_SUSPEND:
293 return msm_cpufreq_resume();
294 case PM_HIBERNATION_PREPARE:
295 case PM_SUSPEND_PREPARE:
296 return msm_cpufreq_suspend();
297 default:
298 return NOTIFY_DONE;
299 }
300}
301
302static struct notifier_block msm_cpufreq_pm_notifier = {
303 .notifier_call = msm_cpufreq_pm_event,
304};
305
306static struct freq_attr *msm_freq_attr[] = {
307 &cpufreq_freq_attr_scaling_available_freqs,
308 NULL,
309};
310
311static struct cpufreq_driver msm_cpufreq_driver = {
312 /* lps calculations are handled here. */
Stephen Boyd4ade5232017-03-02 14:24:13 -0800313 .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
Stephen Boydce5a5782014-08-01 15:39:54 -0700314 .init = msm_cpufreq_init,
315 .verify = msm_cpufreq_verify,
316 .target = msm_cpufreq_target,
Pavankumar Kondeti59386c82017-09-04 11:51:19 +0530317 .resolve_freq = msm_cpufreq_resolve_freq,
Stephen Boydce5a5782014-08-01 15:39:54 -0700318 .get = msm_cpufreq_get_freq,
319 .name = "msm",
320 .attr = msm_freq_attr,
321};
322
323static struct cpufreq_frequency_table *cpufreq_parse_dt(struct device *dev,
324 char *tbl_name, int cpu)
325{
Rohit Gupta13cd56e2016-08-22 16:50:59 -0700326 int ret, nf, i, j;
Stephen Boydce5a5782014-08-01 15:39:54 -0700327 u32 *data;
328 struct cpufreq_frequency_table *ftbl;
329
330 /* Parse list of usable CPU frequencies. */
331 if (!of_find_property(dev->of_node, tbl_name, &nf))
332 return ERR_PTR(-EINVAL);
333 nf /= sizeof(*data);
334
335 if (nf == 0)
336 return ERR_PTR(-EINVAL);
337
338 data = devm_kzalloc(dev, nf * sizeof(*data), GFP_KERNEL);
339 if (!data)
340 return ERR_PTR(-ENOMEM);
341
342 ret = of_property_read_u32_array(dev->of_node, tbl_name, data, nf);
343 if (ret)
344 return ERR_PTR(ret);
345
346 ftbl = devm_kzalloc(dev, (nf + 1) * sizeof(*ftbl), GFP_KERNEL);
347 if (!ftbl)
348 return ERR_PTR(-ENOMEM);
349
Rohit Gupta13cd56e2016-08-22 16:50:59 -0700350 j = 0;
Stephen Boydce5a5782014-08-01 15:39:54 -0700351 for (i = 0; i < nf; i++) {
352 unsigned long f;
353
354 f = clk_round_rate(cpu_clk[cpu], data[i] * 1000);
355 if (IS_ERR_VALUE(f))
356 break;
357 f /= 1000;
358
359 /*
Rohit Gupta13cd56e2016-08-22 16:50:59 -0700360 * Don't repeat frequencies if they round up to the same clock
361 * frequency.
Stephen Boydce5a5782014-08-01 15:39:54 -0700362 *
Stephen Boydce5a5782014-08-01 15:39:54 -0700363 */
Rohit Gupta13cd56e2016-08-22 16:50:59 -0700364 if (j > 0 && f <= ftbl[j - 1].frequency)
365 continue;
Stephen Boydce5a5782014-08-01 15:39:54 -0700366
Rohit Gupta13cd56e2016-08-22 16:50:59 -0700367 ftbl[j].driver_data = j;
368 ftbl[j].frequency = f;
369 j++;
Stephen Boydce5a5782014-08-01 15:39:54 -0700370 }
371
Rohit Gupta13cd56e2016-08-22 16:50:59 -0700372 ftbl[j].driver_data = j;
373 ftbl[j].frequency = CPUFREQ_TABLE_END;
Stephen Boydce5a5782014-08-01 15:39:54 -0700374
375 devm_kfree(dev, data);
376
377 return ftbl;
378}
379
380static int msm_cpufreq_probe(struct platform_device *pdev)
381{
382 struct device *dev = &pdev->dev;
383 char clk_name[] = "cpu??_clk";
384 char tbl_name[] = "qcom,cpufreq-table-??";
385 struct clk *c;
Stephen Boyd072b57e2017-03-22 18:12:45 -0700386 int cpu, ret;
Stephen Boydce5a5782014-08-01 15:39:54 -0700387 struct cpufreq_frequency_table *ftbl;
388
389 l2_clk = devm_clk_get(dev, "l2_clk");
390 if (IS_ERR(l2_clk))
391 l2_clk = NULL;
392
393 for_each_possible_cpu(cpu) {
394 snprintf(clk_name, sizeof(clk_name), "cpu%d_clk", cpu);
395 c = devm_clk_get(dev, clk_name);
Santosh Mardiefe63672017-01-12 16:48:53 +0530396 if (cpu == 0 && IS_ERR(c))
Stephen Boydce5a5782014-08-01 15:39:54 -0700397 return PTR_ERR(c);
Santosh Mardiefe63672017-01-12 16:48:53 +0530398 else if (IS_ERR(c))
399 c = cpu_clk[cpu-1];
Stephen Boydce5a5782014-08-01 15:39:54 -0700400 cpu_clk[cpu] = c;
401 }
402 hotplug_ready = true;
403
404 /* Use per-policy governor tunable for some targets */
405 if (of_property_read_bool(dev->of_node, "qcom,governor-per-policy"))
406 msm_cpufreq_driver.flags |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY;
407
408 /* Parse commong cpufreq table for all CPUs */
409 ftbl = cpufreq_parse_dt(dev, "qcom,cpufreq-table", 0);
410 if (!IS_ERR(ftbl)) {
411 for_each_possible_cpu(cpu)
412 per_cpu(freq_table, cpu) = ftbl;
413 return 0;
414 }
415
416 /*
417 * No common table. Parse individual tables for each unique
418 * CPU clock.
419 */
420 for_each_possible_cpu(cpu) {
421 snprintf(tbl_name, sizeof(tbl_name),
422 "qcom,cpufreq-table-%d", cpu);
423 ftbl = cpufreq_parse_dt(dev, tbl_name, cpu);
424
425 /* CPU0 must contain freq table */
426 if (cpu == 0 && IS_ERR(ftbl)) {
427 dev_err(dev, "Failed to parse CPU0's freq table\n");
428 return PTR_ERR(ftbl);
429 }
430 if (cpu == 0) {
431 per_cpu(freq_table, cpu) = ftbl;
432 continue;
433 }
434
435 if (cpu_clk[cpu] != cpu_clk[cpu - 1] && IS_ERR(ftbl)) {
436 dev_err(dev, "Failed to parse CPU%d's freq table\n",
437 cpu);
438 return PTR_ERR(ftbl);
439 }
440
441 /* Use previous CPU's table if it shares same clock */
442 if (cpu_clk[cpu] == cpu_clk[cpu - 1]) {
443 if (!IS_ERR(ftbl)) {
444 dev_warn(dev, "Conflicting tables for CPU%d\n",
445 cpu);
446 devm_kfree(dev, ftbl);
447 }
448 ftbl = per_cpu(freq_table, cpu - 1);
449 }
450 per_cpu(freq_table, cpu) = ftbl;
451 }
452
Stephen Boyd072b57e2017-03-22 18:12:45 -0700453 ret = register_pm_notifier(&msm_cpufreq_pm_notifier);
454 if (ret)
455 return ret;
456
457 ret = cpufreq_register_driver(&msm_cpufreq_driver);
458 if (ret)
459 unregister_pm_notifier(&msm_cpufreq_pm_notifier);
460
461 return ret;
Stephen Boydce5a5782014-08-01 15:39:54 -0700462}
463
464static const struct of_device_id msm_cpufreq_match_table[] = {
465 { .compatible = "qcom,msm-cpufreq" },
466 {}
467};
468
469static struct platform_driver msm_cpufreq_plat_driver = {
470 .probe = msm_cpufreq_probe,
471 .driver = {
472 .name = "msm-cpufreq",
473 .of_match_table = msm_cpufreq_match_table,
474 },
475};
476
477static int __init msm_cpufreq_register(void)
478{
479 int cpu, rc;
480
481 for_each_possible_cpu(cpu) {
482 mutex_init(&(per_cpu(suspend_data, cpu).suspend_mutex));
483 per_cpu(suspend_data, cpu).device_suspended = 0;
Pavankumar Kondetid9d13ae2017-10-18 08:28:16 +0530484 per_cpu(cached_resolve_freq, cpu) = UINT_MAX;
Stephen Boydce5a5782014-08-01 15:39:54 -0700485 }
486
487 rc = platform_driver_register(&msm_cpufreq_plat_driver);
488 if (rc < 0) {
489 /* Unblock hotplug if msm-cpufreq probe fails */
Stephen Boyd6208a582017-03-02 15:09:13 -0800490 cpuhp_remove_state_nocalls(CPUHP_QCOM_CPUFREQ_PREPARE);
491 cpuhp_remove_state_nocalls(CPUHP_AP_QCOM_CPUFREQ_STARTING);
Stephen Boydce5a5782014-08-01 15:39:54 -0700492 for_each_possible_cpu(cpu)
493 mutex_destroy(&(per_cpu(suspend_data, cpu).
494 suspend_mutex));
495 return rc;
496 }
497
Stephen Boyd072b57e2017-03-22 18:12:45 -0700498 return 0;
Stephen Boydce5a5782014-08-01 15:39:54 -0700499}
500
501subsys_initcall(msm_cpufreq_register);
502
503static int __init msm_cpufreq_early_register(void)
504{
Stephen Boyd6208a582017-03-02 15:09:13 -0800505 int ret;
506
507 ret = cpuhp_setup_state_nocalls(CPUHP_AP_QCOM_CPUFREQ_STARTING,
508 "AP_QCOM_CPUFREQ_STARTING",
509 qcom_cpufreq_starting_cpu,
510 qcom_cpufreq_dying_cpu);
511 if (ret)
512 return ret;
513
514 ret = cpuhp_setup_state_nocalls(CPUHP_QCOM_CPUFREQ_PREPARE,
515 "QCOM_CPUFREQ_PREPARE",
516 qcom_cpufreq_up_cpu,
517 qcom_cpufreq_dead_cpu);
518 if (!ret)
519 return ret;
520 cpuhp_remove_state_nocalls(CPUHP_AP_QCOM_CPUFREQ_STARTING);
521 return ret;
Stephen Boydce5a5782014-08-01 15:39:54 -0700522}
523core_initcall(msm_cpufreq_early_register);