blob: f968ffd9f6afc8969c43f16304dddbd2f3e42ebf [file] [log] [blame]
Junjie Wub8be3332015-02-03 14:31:09 -08001/* drivers/cpufreq/qcom-cpufreq.c
Stephen Boydce5a5782014-08-01 15:39:54 -07002 *
3 * MSM architecture cpufreq driver
4 *
5 * Copyright (C) 2007 Google, Inc.
Santosh Mardiefe63672017-01-12 16:48:53 +05306 * Copyright (c) 2007-2017, The Linux Foundation. All rights reserved.
Stephen Boydce5a5782014-08-01 15:39:54 -07007 * Author: Mike A. Chan <mikechan@google.com>
8 *
9 * This software is licensed under the terms of the GNU General Public
10 * License version 2, as published by the Free Software Foundation, and
11 * may be copied, distributed, and modified under those terms.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/cpufreq.h>
23#include <linux/cpu.h>
24#include <linux/cpumask.h>
Stephen Boydce5a5782014-08-01 15:39:54 -070025#include <linux/suspend.h>
26#include <linux/clk.h>
27#include <linux/err.h>
28#include <linux/platform_device.h>
29#include <linux/of.h>
30#include <trace/events/power.h>
31
32static DEFINE_MUTEX(l2bw_lock);
33
34static struct clk *cpu_clk[NR_CPUS];
35static struct clk *l2_clk;
36static DEFINE_PER_CPU(struct cpufreq_frequency_table *, freq_table);
37static bool hotplug_ready;
38
39struct cpufreq_suspend_t {
40 struct mutex suspend_mutex;
41 int device_suspended;
42};
43
44static DEFINE_PER_CPU(struct cpufreq_suspend_t, suspend_data);
45
46static int set_cpu_freq(struct cpufreq_policy *policy, unsigned int new_freq,
47 unsigned int index)
48{
49 int ret = 0;
Stephen Boydce5a5782014-08-01 15:39:54 -070050 struct cpufreq_freqs freqs;
Stephen Boydce5a5782014-08-01 15:39:54 -070051 unsigned long rate;
52
53 freqs.old = policy->cur;
54 freqs.new = new_freq;
55 freqs.cpu = policy->cpu;
56
Stephen Boydce5a5782014-08-01 15:39:54 -070057 trace_cpu_frequency_switch_start(freqs.old, freqs.new, policy->cpu);
58 cpufreq_freq_transition_begin(policy, &freqs);
59
60 rate = new_freq * 1000;
61 rate = clk_round_rate(cpu_clk[policy->cpu], rate);
62 ret = clk_set_rate(cpu_clk[policy->cpu], rate);
63 cpufreq_freq_transition_end(policy, &freqs, ret);
64 if (!ret)
65 trace_cpu_frequency_switch_end(policy->cpu);
66
Stephen Boydce5a5782014-08-01 15:39:54 -070067 return ret;
68}
69
70static int msm_cpufreq_target(struct cpufreq_policy *policy,
71 unsigned int target_freq,
72 unsigned int relation)
73{
Junjie Wu72881072015-06-10 17:57:07 -070074 int ret = 0;
Stephen Boydce5a5782014-08-01 15:39:54 -070075 int index;
76 struct cpufreq_frequency_table *table;
77
78 mutex_lock(&per_cpu(suspend_data, policy->cpu).suspend_mutex);
79
Junjie Wu72881072015-06-10 17:57:07 -070080 if (target_freq == policy->cur)
81 goto done;
82
Stephen Boydce5a5782014-08-01 15:39:54 -070083 if (per_cpu(suspend_data, policy->cpu).device_suspended) {
84 pr_debug("cpufreq: cpu%d scheduling frequency change in suspend.\n",
85 policy->cpu);
86 ret = -EFAULT;
87 goto done;
88 }
89
Stephen Boydcf930d62017-03-02 14:22:37 -080090 table = policy->freq_table;
Stephen Boydce5a5782014-08-01 15:39:54 -070091 if (!table) {
92 pr_err("cpufreq: Failed to get frequency table for CPU%u\n",
93 policy->cpu);
94 ret = -ENODEV;
95 goto done;
96 }
Stephen Boydcf930d62017-03-02 14:22:37 -080097 index = cpufreq_frequency_table_target(policy, target_freq, relation);
Stephen Boydce5a5782014-08-01 15:39:54 -070098
99 pr_debug("CPU[%d] target %d relation %d (%d-%d) selected %d\n",
100 policy->cpu, target_freq, relation,
101 policy->min, policy->max, table[index].frequency);
102
103 ret = set_cpu_freq(policy, table[index].frequency,
104 table[index].driver_data);
105done:
106 mutex_unlock(&per_cpu(suspend_data, policy->cpu).suspend_mutex);
107 return ret;
108}
109
110static int msm_cpufreq_verify(struct cpufreq_policy *policy)
111{
112 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
113 policy->cpuinfo.max_freq);
114 return 0;
115}
116
117static unsigned int msm_cpufreq_get_freq(unsigned int cpu)
118{
119 return clk_get_rate(cpu_clk[cpu]) / 1000;
120}
121
122static int msm_cpufreq_init(struct cpufreq_policy *policy)
123{
124 int cur_freq;
125 int index;
126 int ret = 0;
127 struct cpufreq_frequency_table *table =
128 per_cpu(freq_table, policy->cpu);
129 int cpu;
130
131 /*
132 * In some SoC, some cores are clocked by same source, and their
133 * frequencies can not be changed independently. Find all other
134 * CPUs that share same clock, and mark them as controlled by
135 * same policy.
136 */
137 for_each_possible_cpu(cpu)
138 if (cpu_clk[cpu] == cpu_clk[policy->cpu])
139 cpumask_set_cpu(cpu, policy->cpus);
140
141 ret = cpufreq_table_validate_and_show(policy, table);
142 if (ret) {
143 pr_err("cpufreq: failed to get policy min/max\n");
144 return ret;
145 }
146
147 cur_freq = clk_get_rate(cpu_clk[policy->cpu])/1000;
148
Stephen Boyd4ade5232017-03-02 14:24:13 -0800149 index = cpufreq_frequency_table_target(policy, cur_freq,
150 CPUFREQ_RELATION_H);
Stephen Boydce5a5782014-08-01 15:39:54 -0700151 /*
152 * Call set_cpu_freq unconditionally so that when cpu is set to
153 * online, frequency limit will always be updated.
154 */
155 ret = set_cpu_freq(policy, table[index].frequency,
156 table[index].driver_data);
157 if (ret)
158 return ret;
159 pr_debug("cpufreq: cpu%d init at %d switching to %d\n",
160 policy->cpu, cur_freq, table[index].frequency);
161 policy->cur = table[index].frequency;
162
163 return 0;
164}
165
Stephen Boyd6208a582017-03-02 15:09:13 -0800166static int qcom_cpufreq_dead_cpu(unsigned int cpu)
Stephen Boydce5a5782014-08-01 15:39:54 -0700167{
Stephen Boyd6208a582017-03-02 15:09:13 -0800168 /* Fail hotplug until this driver can get CPU clocks */
169 if (!hotplug_ready)
170 return -EINVAL;
171
172 clk_unprepare(cpu_clk[cpu]);
173 clk_unprepare(l2_clk);
174 return 0;
175}
176
177static int qcom_cpufreq_up_cpu(unsigned int cpu)
178{
Stephen Boydce5a5782014-08-01 15:39:54 -0700179 int rc;
180
181 /* Fail hotplug until this driver can get CPU clocks */
182 if (!hotplug_ready)
Stephen Boyd6208a582017-03-02 15:09:13 -0800183 return -EINVAL;
Stephen Boydce5a5782014-08-01 15:39:54 -0700184
Stephen Boyd6208a582017-03-02 15:09:13 -0800185 rc = clk_prepare(l2_clk);
186 if (rc < 0)
187 return rc;
188 rc = clk_prepare(cpu_clk[cpu]);
189 if (rc < 0)
Stephen Boydce5a5782014-08-01 15:39:54 -0700190 clk_unprepare(l2_clk);
Stephen Boyd6208a582017-03-02 15:09:13 -0800191 return rc;
Stephen Boydce5a5782014-08-01 15:39:54 -0700192}
193
Stephen Boyd6208a582017-03-02 15:09:13 -0800194static int qcom_cpufreq_dying_cpu(unsigned int cpu)
195{
196 /* Fail hotplug until this driver can get CPU clocks */
197 if (!hotplug_ready)
198 return -EINVAL;
199
200 clk_disable(cpu_clk[cpu]);
201 clk_disable(l2_clk);
202 return 0;
203}
204
205static int qcom_cpufreq_starting_cpu(unsigned int cpu)
206{
207 int rc;
208
209 /* Fail hotplug until this driver can get CPU clocks */
210 if (!hotplug_ready)
211 return -EINVAL;
212
213 rc = clk_enable(l2_clk);
214 if (rc < 0)
215 return rc;
216 rc = clk_enable(cpu_clk[cpu]);
217 if (rc < 0)
218 clk_disable(l2_clk);
219 return rc;
220}
Stephen Boydce5a5782014-08-01 15:39:54 -0700221
222static int msm_cpufreq_suspend(void)
223{
224 int cpu;
225
226 for_each_possible_cpu(cpu) {
227 mutex_lock(&per_cpu(suspend_data, cpu).suspend_mutex);
228 per_cpu(suspend_data, cpu).device_suspended = 1;
229 mutex_unlock(&per_cpu(suspend_data, cpu).suspend_mutex);
230 }
231
232 return NOTIFY_DONE;
233}
234
235static int msm_cpufreq_resume(void)
236{
Junjie Wud5261b02014-12-01 21:21:00 -0800237 int cpu, ret;
238 struct cpufreq_policy policy;
Stephen Boydce5a5782014-08-01 15:39:54 -0700239
240 for_each_possible_cpu(cpu) {
241 per_cpu(suspend_data, cpu).device_suspended = 0;
242 }
243
Junjie Wud5261b02014-12-01 21:21:00 -0800244 /*
245 * Freq request might be rejected during suspend, resulting
246 * in policy->cur violating min/max constraint.
247 * Correct the frequency as soon as possible.
248 */
249 get_online_cpus();
250 for_each_online_cpu(cpu) {
251 ret = cpufreq_get_policy(&policy, cpu);
252 if (ret)
253 continue;
254 if (policy.cur <= policy.max && policy.cur >= policy.min)
255 continue;
256 ret = cpufreq_update_policy(cpu);
257 if (ret)
258 pr_info("cpufreq: Current frequency violates policy min/max for CPU%d\n",
259 cpu);
260 else
261 pr_info("cpufreq: Frequency violation fixed for CPU%d\n",
262 cpu);
263 }
264 put_online_cpus();
265
Stephen Boydce5a5782014-08-01 15:39:54 -0700266 return NOTIFY_DONE;
267}
268
269static int msm_cpufreq_pm_event(struct notifier_block *this,
270 unsigned long event, void *ptr)
271{
272 switch (event) {
273 case PM_POST_HIBERNATION:
274 case PM_POST_SUSPEND:
275 return msm_cpufreq_resume();
276 case PM_HIBERNATION_PREPARE:
277 case PM_SUSPEND_PREPARE:
278 return msm_cpufreq_suspend();
279 default:
280 return NOTIFY_DONE;
281 }
282}
283
284static struct notifier_block msm_cpufreq_pm_notifier = {
285 .notifier_call = msm_cpufreq_pm_event,
286};
287
288static struct freq_attr *msm_freq_attr[] = {
289 &cpufreq_freq_attr_scaling_available_freqs,
290 NULL,
291};
292
293static struct cpufreq_driver msm_cpufreq_driver = {
294 /* lps calculations are handled here. */
Stephen Boyd4ade5232017-03-02 14:24:13 -0800295 .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
Stephen Boydce5a5782014-08-01 15:39:54 -0700296 .init = msm_cpufreq_init,
297 .verify = msm_cpufreq_verify,
298 .target = msm_cpufreq_target,
299 .get = msm_cpufreq_get_freq,
300 .name = "msm",
301 .attr = msm_freq_attr,
302};
303
304static struct cpufreq_frequency_table *cpufreq_parse_dt(struct device *dev,
305 char *tbl_name, int cpu)
306{
Rohit Gupta13cd56e2016-08-22 16:50:59 -0700307 int ret, nf, i, j;
Stephen Boydce5a5782014-08-01 15:39:54 -0700308 u32 *data;
309 struct cpufreq_frequency_table *ftbl;
310
311 /* Parse list of usable CPU frequencies. */
312 if (!of_find_property(dev->of_node, tbl_name, &nf))
313 return ERR_PTR(-EINVAL);
314 nf /= sizeof(*data);
315
316 if (nf == 0)
317 return ERR_PTR(-EINVAL);
318
319 data = devm_kzalloc(dev, nf * sizeof(*data), GFP_KERNEL);
320 if (!data)
321 return ERR_PTR(-ENOMEM);
322
323 ret = of_property_read_u32_array(dev->of_node, tbl_name, data, nf);
324 if (ret)
325 return ERR_PTR(ret);
326
327 ftbl = devm_kzalloc(dev, (nf + 1) * sizeof(*ftbl), GFP_KERNEL);
328 if (!ftbl)
329 return ERR_PTR(-ENOMEM);
330
Rohit Gupta13cd56e2016-08-22 16:50:59 -0700331 j = 0;
Stephen Boydce5a5782014-08-01 15:39:54 -0700332 for (i = 0; i < nf; i++) {
333 unsigned long f;
334
335 f = clk_round_rate(cpu_clk[cpu], data[i] * 1000);
336 if (IS_ERR_VALUE(f))
337 break;
338 f /= 1000;
339
340 /*
Rohit Gupta13cd56e2016-08-22 16:50:59 -0700341 * Don't repeat frequencies if they round up to the same clock
342 * frequency.
Stephen Boydce5a5782014-08-01 15:39:54 -0700343 *
Stephen Boydce5a5782014-08-01 15:39:54 -0700344 */
Rohit Gupta13cd56e2016-08-22 16:50:59 -0700345 if (j > 0 && f <= ftbl[j - 1].frequency)
346 continue;
Stephen Boydce5a5782014-08-01 15:39:54 -0700347
Rohit Gupta13cd56e2016-08-22 16:50:59 -0700348 ftbl[j].driver_data = j;
349 ftbl[j].frequency = f;
350 j++;
Stephen Boydce5a5782014-08-01 15:39:54 -0700351 }
352
Rohit Gupta13cd56e2016-08-22 16:50:59 -0700353 ftbl[j].driver_data = j;
354 ftbl[j].frequency = CPUFREQ_TABLE_END;
Stephen Boydce5a5782014-08-01 15:39:54 -0700355
356 devm_kfree(dev, data);
357
358 return ftbl;
359}
360
361static int msm_cpufreq_probe(struct platform_device *pdev)
362{
363 struct device *dev = &pdev->dev;
364 char clk_name[] = "cpu??_clk";
365 char tbl_name[] = "qcom,cpufreq-table-??";
366 struct clk *c;
Stephen Boyd072b57e2017-03-22 18:12:45 -0700367 int cpu, ret;
Stephen Boydce5a5782014-08-01 15:39:54 -0700368 struct cpufreq_frequency_table *ftbl;
369
370 l2_clk = devm_clk_get(dev, "l2_clk");
371 if (IS_ERR(l2_clk))
372 l2_clk = NULL;
373
374 for_each_possible_cpu(cpu) {
375 snprintf(clk_name, sizeof(clk_name), "cpu%d_clk", cpu);
376 c = devm_clk_get(dev, clk_name);
Santosh Mardiefe63672017-01-12 16:48:53 +0530377 if (cpu == 0 && IS_ERR(c))
Stephen Boydce5a5782014-08-01 15:39:54 -0700378 return PTR_ERR(c);
Santosh Mardiefe63672017-01-12 16:48:53 +0530379 else if (IS_ERR(c))
380 c = cpu_clk[cpu-1];
Stephen Boydce5a5782014-08-01 15:39:54 -0700381 cpu_clk[cpu] = c;
382 }
383 hotplug_ready = true;
384
385 /* Use per-policy governor tunable for some targets */
386 if (of_property_read_bool(dev->of_node, "qcom,governor-per-policy"))
387 msm_cpufreq_driver.flags |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY;
388
389 /* Parse commong cpufreq table for all CPUs */
390 ftbl = cpufreq_parse_dt(dev, "qcom,cpufreq-table", 0);
391 if (!IS_ERR(ftbl)) {
392 for_each_possible_cpu(cpu)
393 per_cpu(freq_table, cpu) = ftbl;
394 return 0;
395 }
396
397 /*
398 * No common table. Parse individual tables for each unique
399 * CPU clock.
400 */
401 for_each_possible_cpu(cpu) {
402 snprintf(tbl_name, sizeof(tbl_name),
403 "qcom,cpufreq-table-%d", cpu);
404 ftbl = cpufreq_parse_dt(dev, tbl_name, cpu);
405
406 /* CPU0 must contain freq table */
407 if (cpu == 0 && IS_ERR(ftbl)) {
408 dev_err(dev, "Failed to parse CPU0's freq table\n");
409 return PTR_ERR(ftbl);
410 }
411 if (cpu == 0) {
412 per_cpu(freq_table, cpu) = ftbl;
413 continue;
414 }
415
416 if (cpu_clk[cpu] != cpu_clk[cpu - 1] && IS_ERR(ftbl)) {
417 dev_err(dev, "Failed to parse CPU%d's freq table\n",
418 cpu);
419 return PTR_ERR(ftbl);
420 }
421
422 /* Use previous CPU's table if it shares same clock */
423 if (cpu_clk[cpu] == cpu_clk[cpu - 1]) {
424 if (!IS_ERR(ftbl)) {
425 dev_warn(dev, "Conflicting tables for CPU%d\n",
426 cpu);
427 devm_kfree(dev, ftbl);
428 }
429 ftbl = per_cpu(freq_table, cpu - 1);
430 }
431 per_cpu(freq_table, cpu) = ftbl;
432 }
433
Stephen Boyd072b57e2017-03-22 18:12:45 -0700434 ret = register_pm_notifier(&msm_cpufreq_pm_notifier);
435 if (ret)
436 return ret;
437
438 ret = cpufreq_register_driver(&msm_cpufreq_driver);
439 if (ret)
440 unregister_pm_notifier(&msm_cpufreq_pm_notifier);
441
442 return ret;
Stephen Boydce5a5782014-08-01 15:39:54 -0700443}
444
445static const struct of_device_id msm_cpufreq_match_table[] = {
446 { .compatible = "qcom,msm-cpufreq" },
447 {}
448};
449
450static struct platform_driver msm_cpufreq_plat_driver = {
451 .probe = msm_cpufreq_probe,
452 .driver = {
453 .name = "msm-cpufreq",
454 .of_match_table = msm_cpufreq_match_table,
455 },
456};
457
458static int __init msm_cpufreq_register(void)
459{
460 int cpu, rc;
461
462 for_each_possible_cpu(cpu) {
463 mutex_init(&(per_cpu(suspend_data, cpu).suspend_mutex));
464 per_cpu(suspend_data, cpu).device_suspended = 0;
465 }
466
467 rc = platform_driver_register(&msm_cpufreq_plat_driver);
468 if (rc < 0) {
469 /* Unblock hotplug if msm-cpufreq probe fails */
Stephen Boyd6208a582017-03-02 15:09:13 -0800470 cpuhp_remove_state_nocalls(CPUHP_QCOM_CPUFREQ_PREPARE);
471 cpuhp_remove_state_nocalls(CPUHP_AP_QCOM_CPUFREQ_STARTING);
Stephen Boydce5a5782014-08-01 15:39:54 -0700472 for_each_possible_cpu(cpu)
473 mutex_destroy(&(per_cpu(suspend_data, cpu).
474 suspend_mutex));
475 return rc;
476 }
477
Stephen Boyd072b57e2017-03-22 18:12:45 -0700478 return 0;
Stephen Boydce5a5782014-08-01 15:39:54 -0700479}
480
481subsys_initcall(msm_cpufreq_register);
482
483static int __init msm_cpufreq_early_register(void)
484{
Stephen Boyd6208a582017-03-02 15:09:13 -0800485 int ret;
486
487 ret = cpuhp_setup_state_nocalls(CPUHP_AP_QCOM_CPUFREQ_STARTING,
488 "AP_QCOM_CPUFREQ_STARTING",
489 qcom_cpufreq_starting_cpu,
490 qcom_cpufreq_dying_cpu);
491 if (ret)
492 return ret;
493
494 ret = cpuhp_setup_state_nocalls(CPUHP_QCOM_CPUFREQ_PREPARE,
495 "QCOM_CPUFREQ_PREPARE",
496 qcom_cpufreq_up_cpu,
497 qcom_cpufreq_dead_cpu);
498 if (!ret)
499 return ret;
500 cpuhp_remove_state_nocalls(CPUHP_AP_QCOM_CPUFREQ_STARTING);
501 return ret;
Stephen Boydce5a5782014-08-01 15:39:54 -0700502}
503core_initcall(msm_cpufreq_early_register);