blob: 8a5ad7061197d72d5a231806890a528704d0ef38 [file] [log] [blame]
Junjie Wub8be3332015-02-03 14:31:09 -08001/* drivers/cpufreq/qcom-cpufreq.c
Stephen Boydce5a5782014-08-01 15:39:54 -07002 *
3 * MSM architecture cpufreq driver
4 *
5 * Copyright (C) 2007 Google, Inc.
Santosh Mardiefe63672017-01-12 16:48:53 +05306 * Copyright (c) 2007-2017, The Linux Foundation. All rights reserved.
Stephen Boydce5a5782014-08-01 15:39:54 -07007 * Author: Mike A. Chan <mikechan@google.com>
8 *
9 * This software is licensed under the terms of the GNU General Public
10 * License version 2, as published by the Free Software Foundation, and
11 * may be copied, distributed, and modified under those terms.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/cpufreq.h>
23#include <linux/cpu.h>
24#include <linux/cpumask.h>
Stephen Boydce5a5782014-08-01 15:39:54 -070025#include <linux/suspend.h>
26#include <linux/clk.h>
27#include <linux/err.h>
28#include <linux/platform_device.h>
29#include <linux/of.h>
Ram Chandrasekarb18b8092017-12-06 12:22:24 -070030#include <linux/of_device.h>
31#include <linux/cpu_cooling.h>
Stephen Boydce5a5782014-08-01 15:39:54 -070032#include <trace/events/power.h>
33
34static DEFINE_MUTEX(l2bw_lock);
35
Ram Chandrasekarb18b8092017-12-06 12:22:24 -070036static struct thermal_cooling_device *cdev[NR_CPUS];
Stephen Boydce5a5782014-08-01 15:39:54 -070037static struct clk *cpu_clk[NR_CPUS];
38static struct clk *l2_clk;
39static DEFINE_PER_CPU(struct cpufreq_frequency_table *, freq_table);
40static bool hotplug_ready;
41
42struct cpufreq_suspend_t {
43 struct mutex suspend_mutex;
44 int device_suspended;
45};
46
47static DEFINE_PER_CPU(struct cpufreq_suspend_t, suspend_data);
Pavankumar Kondetid9d13ae2017-10-18 08:28:16 +053048static DEFINE_PER_CPU(int, cached_resolve_idx);
49static DEFINE_PER_CPU(unsigned int, cached_resolve_freq);
Stephen Boydce5a5782014-08-01 15:39:54 -070050
51static int set_cpu_freq(struct cpufreq_policy *policy, unsigned int new_freq,
52 unsigned int index)
53{
54 int ret = 0;
Stephen Boydce5a5782014-08-01 15:39:54 -070055 struct cpufreq_freqs freqs;
Stephen Boydce5a5782014-08-01 15:39:54 -070056 unsigned long rate;
57
58 freqs.old = policy->cur;
59 freqs.new = new_freq;
60 freqs.cpu = policy->cpu;
61
Stephen Boydce5a5782014-08-01 15:39:54 -070062 trace_cpu_frequency_switch_start(freqs.old, freqs.new, policy->cpu);
63 cpufreq_freq_transition_begin(policy, &freqs);
64
65 rate = new_freq * 1000;
66 rate = clk_round_rate(cpu_clk[policy->cpu], rate);
67 ret = clk_set_rate(cpu_clk[policy->cpu], rate);
68 cpufreq_freq_transition_end(policy, &freqs, ret);
69 if (!ret)
70 trace_cpu_frequency_switch_end(policy->cpu);
71
Stephen Boydce5a5782014-08-01 15:39:54 -070072 return ret;
73}
74
75static int msm_cpufreq_target(struct cpufreq_policy *policy,
76 unsigned int target_freq,
77 unsigned int relation)
78{
Junjie Wu72881072015-06-10 17:57:07 -070079 int ret = 0;
Stephen Boydce5a5782014-08-01 15:39:54 -070080 int index;
81 struct cpufreq_frequency_table *table;
Pavankumar Kondetid9d13ae2017-10-18 08:28:16 +053082 int first_cpu = cpumask_first(policy->related_cpus);
Stephen Boydce5a5782014-08-01 15:39:54 -070083
84 mutex_lock(&per_cpu(suspend_data, policy->cpu).suspend_mutex);
85
Junjie Wu72881072015-06-10 17:57:07 -070086 if (target_freq == policy->cur)
87 goto done;
88
Stephen Boydce5a5782014-08-01 15:39:54 -070089 if (per_cpu(suspend_data, policy->cpu).device_suspended) {
90 pr_debug("cpufreq: cpu%d scheduling frequency change in suspend.\n",
91 policy->cpu);
92 ret = -EFAULT;
93 goto done;
94 }
95
Stephen Boydcf930d62017-03-02 14:22:37 -080096 table = policy->freq_table;
Pavankumar Kondetid9d13ae2017-10-18 08:28:16 +053097 if (per_cpu(cached_resolve_freq, first_cpu) == target_freq)
98 index = per_cpu(cached_resolve_idx, first_cpu);
99 else
100 index = cpufreq_frequency_table_target(policy, target_freq,
101 relation);
Stephen Boydce5a5782014-08-01 15:39:54 -0700102
103 pr_debug("CPU[%d] target %d relation %d (%d-%d) selected %d\n",
104 policy->cpu, target_freq, relation,
105 policy->min, policy->max, table[index].frequency);
106
107 ret = set_cpu_freq(policy, table[index].frequency,
108 table[index].driver_data);
109done:
110 mutex_unlock(&per_cpu(suspend_data, policy->cpu).suspend_mutex);
111 return ret;
112}
113
Pavankumar Kondeti59386c82017-09-04 11:51:19 +0530114static unsigned int msm_cpufreq_resolve_freq(struct cpufreq_policy *policy,
115 unsigned int target_freq)
116{
117 int index;
Pavankumar Kondetid9d13ae2017-10-18 08:28:16 +0530118 int first_cpu = cpumask_first(policy->related_cpus);
119 unsigned int freq;
Pavankumar Kondeti59386c82017-09-04 11:51:19 +0530120
121 index = cpufreq_frequency_table_target(policy, target_freq,
122 CPUFREQ_RELATION_L);
Pavankumar Kondetid9d13ae2017-10-18 08:28:16 +0530123 freq = policy->freq_table[index].frequency;
124
125 per_cpu(cached_resolve_idx, first_cpu) = index;
126 per_cpu(cached_resolve_freq, first_cpu) = freq;
127
128 return freq;
Pavankumar Kondeti59386c82017-09-04 11:51:19 +0530129}
130
Stephen Boydce5a5782014-08-01 15:39:54 -0700131static int msm_cpufreq_verify(struct cpufreq_policy *policy)
132{
133 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
134 policy->cpuinfo.max_freq);
135 return 0;
136}
137
138static unsigned int msm_cpufreq_get_freq(unsigned int cpu)
139{
140 return clk_get_rate(cpu_clk[cpu]) / 1000;
141}
142
143static int msm_cpufreq_init(struct cpufreq_policy *policy)
144{
145 int cur_freq;
146 int index;
147 int ret = 0;
148 struct cpufreq_frequency_table *table =
149 per_cpu(freq_table, policy->cpu);
150 int cpu;
151
152 /*
153 * In some SoC, some cores are clocked by same source, and their
154 * frequencies can not be changed independently. Find all other
155 * CPUs that share same clock, and mark them as controlled by
156 * same policy.
157 */
158 for_each_possible_cpu(cpu)
159 if (cpu_clk[cpu] == cpu_clk[policy->cpu])
160 cpumask_set_cpu(cpu, policy->cpus);
161
162 ret = cpufreq_table_validate_and_show(policy, table);
163 if (ret) {
164 pr_err("cpufreq: failed to get policy min/max\n");
165 return ret;
166 }
167
168 cur_freq = clk_get_rate(cpu_clk[policy->cpu])/1000;
169
Stephen Boyd4ade5232017-03-02 14:24:13 -0800170 index = cpufreq_frequency_table_target(policy, cur_freq,
171 CPUFREQ_RELATION_H);
Stephen Boydce5a5782014-08-01 15:39:54 -0700172 /*
173 * Call set_cpu_freq unconditionally so that when cpu is set to
174 * online, frequency limit will always be updated.
175 */
176 ret = set_cpu_freq(policy, table[index].frequency,
177 table[index].driver_data);
178 if (ret)
179 return ret;
180 pr_debug("cpufreq: cpu%d init at %d switching to %d\n",
181 policy->cpu, cur_freq, table[index].frequency);
182 policy->cur = table[index].frequency;
183
184 return 0;
185}
186
Stephen Boyd6208a582017-03-02 15:09:13 -0800187static int qcom_cpufreq_dead_cpu(unsigned int cpu)
Stephen Boydce5a5782014-08-01 15:39:54 -0700188{
Stephen Boyd6208a582017-03-02 15:09:13 -0800189 /* Fail hotplug until this driver can get CPU clocks */
190 if (!hotplug_ready)
191 return -EINVAL;
192
193 clk_unprepare(cpu_clk[cpu]);
194 clk_unprepare(l2_clk);
195 return 0;
196}
197
198static int qcom_cpufreq_up_cpu(unsigned int cpu)
199{
Stephen Boydce5a5782014-08-01 15:39:54 -0700200 int rc;
201
202 /* Fail hotplug until this driver can get CPU clocks */
203 if (!hotplug_ready)
Stephen Boyd6208a582017-03-02 15:09:13 -0800204 return -EINVAL;
Stephen Boydce5a5782014-08-01 15:39:54 -0700205
Stephen Boyd6208a582017-03-02 15:09:13 -0800206 rc = clk_prepare(l2_clk);
207 if (rc < 0)
208 return rc;
209 rc = clk_prepare(cpu_clk[cpu]);
210 if (rc < 0)
Stephen Boydce5a5782014-08-01 15:39:54 -0700211 clk_unprepare(l2_clk);
Stephen Boyd6208a582017-03-02 15:09:13 -0800212 return rc;
Stephen Boydce5a5782014-08-01 15:39:54 -0700213}
214
Stephen Boyd6208a582017-03-02 15:09:13 -0800215static int qcom_cpufreq_dying_cpu(unsigned int cpu)
216{
217 /* Fail hotplug until this driver can get CPU clocks */
218 if (!hotplug_ready)
219 return -EINVAL;
220
221 clk_disable(cpu_clk[cpu]);
222 clk_disable(l2_clk);
223 return 0;
224}
225
226static int qcom_cpufreq_starting_cpu(unsigned int cpu)
227{
228 int rc;
229
230 /* Fail hotplug until this driver can get CPU clocks */
231 if (!hotplug_ready)
232 return -EINVAL;
233
234 rc = clk_enable(l2_clk);
235 if (rc < 0)
236 return rc;
237 rc = clk_enable(cpu_clk[cpu]);
238 if (rc < 0)
239 clk_disable(l2_clk);
240 return rc;
241}
Stephen Boydce5a5782014-08-01 15:39:54 -0700242
243static int msm_cpufreq_suspend(void)
244{
245 int cpu;
246
247 for_each_possible_cpu(cpu) {
248 mutex_lock(&per_cpu(suspend_data, cpu).suspend_mutex);
249 per_cpu(suspend_data, cpu).device_suspended = 1;
250 mutex_unlock(&per_cpu(suspend_data, cpu).suspend_mutex);
251 }
252
253 return NOTIFY_DONE;
254}
255
256static int msm_cpufreq_resume(void)
257{
Junjie Wud5261b02014-12-01 21:21:00 -0800258 int cpu, ret;
259 struct cpufreq_policy policy;
Stephen Boydce5a5782014-08-01 15:39:54 -0700260
261 for_each_possible_cpu(cpu) {
262 per_cpu(suspend_data, cpu).device_suspended = 0;
263 }
264
Junjie Wud5261b02014-12-01 21:21:00 -0800265 /*
266 * Freq request might be rejected during suspend, resulting
267 * in policy->cur violating min/max constraint.
268 * Correct the frequency as soon as possible.
269 */
270 get_online_cpus();
271 for_each_online_cpu(cpu) {
272 ret = cpufreq_get_policy(&policy, cpu);
273 if (ret)
274 continue;
275 if (policy.cur <= policy.max && policy.cur >= policy.min)
276 continue;
277 ret = cpufreq_update_policy(cpu);
278 if (ret)
279 pr_info("cpufreq: Current frequency violates policy min/max for CPU%d\n",
280 cpu);
281 else
282 pr_info("cpufreq: Frequency violation fixed for CPU%d\n",
283 cpu);
284 }
285 put_online_cpus();
286
Stephen Boydce5a5782014-08-01 15:39:54 -0700287 return NOTIFY_DONE;
288}
289
290static int msm_cpufreq_pm_event(struct notifier_block *this,
291 unsigned long event, void *ptr)
292{
293 switch (event) {
294 case PM_POST_HIBERNATION:
295 case PM_POST_SUSPEND:
296 return msm_cpufreq_resume();
297 case PM_HIBERNATION_PREPARE:
298 case PM_SUSPEND_PREPARE:
299 return msm_cpufreq_suspend();
300 default:
301 return NOTIFY_DONE;
302 }
303}
304
305static struct notifier_block msm_cpufreq_pm_notifier = {
306 .notifier_call = msm_cpufreq_pm_event,
307};
308
309static struct freq_attr *msm_freq_attr[] = {
310 &cpufreq_freq_attr_scaling_available_freqs,
311 NULL,
312};
313
Ram Chandrasekarb18b8092017-12-06 12:22:24 -0700314static void msm_cpufreq_ready(struct cpufreq_policy *policy)
315{
316 struct device_node *np, *lmh_node;
317 unsigned int cpu = 0;
318
319 if (cdev[policy->cpu])
320 return;
321
322 np = of_cpu_device_node_get(policy->cpu);
323 if (WARN_ON(!np))
324 return;
325
326 /*
327 * For now, just loading the cooling device;
328 * thermal DT code takes care of matching them.
329 */
330 if (of_find_property(np, "#cooling-cells", NULL)) {
331 lmh_node = of_parse_phandle(np, "qcom,lmh-dcvs", 0);
332 if (lmh_node) {
333 of_node_put(lmh_node);
334 goto ready_exit;
335 }
336
337 for_each_cpu(cpu, policy->related_cpus) {
338 cpumask_t cpu_mask = CPU_MASK_NONE;
339
340 of_node_put(np);
341 np = of_cpu_device_node_get(cpu);
342 if (WARN_ON(!np))
343 return;
344
345 cpumask_set_cpu(cpu, &cpu_mask);
346 cdev[cpu] = of_cpufreq_cooling_register(np, &cpu_mask);
347 if (IS_ERR(cdev[cpu])) {
348 pr_err(
349 "running cpufreq for CPU%d without cooling dev: %ld\n",
350 cpu, PTR_ERR(cdev[cpu]));
351 cdev[cpu] = NULL;
352 }
353 }
354 }
355
356ready_exit:
357 of_node_put(np);
358}
359
Stephen Boydce5a5782014-08-01 15:39:54 -0700360static struct cpufreq_driver msm_cpufreq_driver = {
361 /* lps calculations are handled here. */
Stephen Boyd4ade5232017-03-02 14:24:13 -0800362 .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
Stephen Boydce5a5782014-08-01 15:39:54 -0700363 .init = msm_cpufreq_init,
364 .verify = msm_cpufreq_verify,
365 .target = msm_cpufreq_target,
Pavankumar Kondeti59386c82017-09-04 11:51:19 +0530366 .resolve_freq = msm_cpufreq_resolve_freq,
Stephen Boydce5a5782014-08-01 15:39:54 -0700367 .get = msm_cpufreq_get_freq,
368 .name = "msm",
369 .attr = msm_freq_attr,
Ram Chandrasekarb18b8092017-12-06 12:22:24 -0700370 .ready = msm_cpufreq_ready,
Stephen Boydce5a5782014-08-01 15:39:54 -0700371};
372
373static struct cpufreq_frequency_table *cpufreq_parse_dt(struct device *dev,
374 char *tbl_name, int cpu)
375{
Rohit Gupta13cd56e2016-08-22 16:50:59 -0700376 int ret, nf, i, j;
Stephen Boydce5a5782014-08-01 15:39:54 -0700377 u32 *data;
378 struct cpufreq_frequency_table *ftbl;
379
380 /* Parse list of usable CPU frequencies. */
381 if (!of_find_property(dev->of_node, tbl_name, &nf))
382 return ERR_PTR(-EINVAL);
383 nf /= sizeof(*data);
384
385 if (nf == 0)
386 return ERR_PTR(-EINVAL);
387
388 data = devm_kzalloc(dev, nf * sizeof(*data), GFP_KERNEL);
389 if (!data)
390 return ERR_PTR(-ENOMEM);
391
392 ret = of_property_read_u32_array(dev->of_node, tbl_name, data, nf);
393 if (ret)
394 return ERR_PTR(ret);
395
396 ftbl = devm_kzalloc(dev, (nf + 1) * sizeof(*ftbl), GFP_KERNEL);
397 if (!ftbl)
398 return ERR_PTR(-ENOMEM);
399
Rohit Gupta13cd56e2016-08-22 16:50:59 -0700400 j = 0;
Stephen Boydce5a5782014-08-01 15:39:54 -0700401 for (i = 0; i < nf; i++) {
402 unsigned long f;
403
404 f = clk_round_rate(cpu_clk[cpu], data[i] * 1000);
405 if (IS_ERR_VALUE(f))
406 break;
407 f /= 1000;
408
409 /*
Rohit Gupta13cd56e2016-08-22 16:50:59 -0700410 * Don't repeat frequencies if they round up to the same clock
411 * frequency.
Stephen Boydce5a5782014-08-01 15:39:54 -0700412 *
Stephen Boydce5a5782014-08-01 15:39:54 -0700413 */
Rohit Gupta13cd56e2016-08-22 16:50:59 -0700414 if (j > 0 && f <= ftbl[j - 1].frequency)
415 continue;
Stephen Boydce5a5782014-08-01 15:39:54 -0700416
Rohit Gupta13cd56e2016-08-22 16:50:59 -0700417 ftbl[j].driver_data = j;
418 ftbl[j].frequency = f;
419 j++;
Stephen Boydce5a5782014-08-01 15:39:54 -0700420 }
421
Rohit Gupta13cd56e2016-08-22 16:50:59 -0700422 ftbl[j].driver_data = j;
423 ftbl[j].frequency = CPUFREQ_TABLE_END;
Stephen Boydce5a5782014-08-01 15:39:54 -0700424
425 devm_kfree(dev, data);
426
427 return ftbl;
428}
429
430static int msm_cpufreq_probe(struct platform_device *pdev)
431{
432 struct device *dev = &pdev->dev;
433 char clk_name[] = "cpu??_clk";
434 char tbl_name[] = "qcom,cpufreq-table-??";
435 struct clk *c;
Stephen Boyd072b57e2017-03-22 18:12:45 -0700436 int cpu, ret;
Stephen Boydce5a5782014-08-01 15:39:54 -0700437 struct cpufreq_frequency_table *ftbl;
438
439 l2_clk = devm_clk_get(dev, "l2_clk");
440 if (IS_ERR(l2_clk))
441 l2_clk = NULL;
442
443 for_each_possible_cpu(cpu) {
444 snprintf(clk_name, sizeof(clk_name), "cpu%d_clk", cpu);
445 c = devm_clk_get(dev, clk_name);
Santosh Mardiefe63672017-01-12 16:48:53 +0530446 if (cpu == 0 && IS_ERR(c))
Stephen Boydce5a5782014-08-01 15:39:54 -0700447 return PTR_ERR(c);
Santosh Mardiefe63672017-01-12 16:48:53 +0530448 else if (IS_ERR(c))
449 c = cpu_clk[cpu-1];
Stephen Boydce5a5782014-08-01 15:39:54 -0700450 cpu_clk[cpu] = c;
451 }
452 hotplug_ready = true;
453
454 /* Use per-policy governor tunable for some targets */
455 if (of_property_read_bool(dev->of_node, "qcom,governor-per-policy"))
456 msm_cpufreq_driver.flags |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY;
457
458 /* Parse commong cpufreq table for all CPUs */
459 ftbl = cpufreq_parse_dt(dev, "qcom,cpufreq-table", 0);
460 if (!IS_ERR(ftbl)) {
461 for_each_possible_cpu(cpu)
462 per_cpu(freq_table, cpu) = ftbl;
Maria Yu1f6dee72017-12-01 13:40:49 +0800463 goto out_register;
Stephen Boydce5a5782014-08-01 15:39:54 -0700464 }
465
466 /*
467 * No common table. Parse individual tables for each unique
468 * CPU clock.
469 */
470 for_each_possible_cpu(cpu) {
471 snprintf(tbl_name, sizeof(tbl_name),
472 "qcom,cpufreq-table-%d", cpu);
473 ftbl = cpufreq_parse_dt(dev, tbl_name, cpu);
474
475 /* CPU0 must contain freq table */
476 if (cpu == 0 && IS_ERR(ftbl)) {
477 dev_err(dev, "Failed to parse CPU0's freq table\n");
478 return PTR_ERR(ftbl);
479 }
480 if (cpu == 0) {
481 per_cpu(freq_table, cpu) = ftbl;
482 continue;
483 }
484
485 if (cpu_clk[cpu] != cpu_clk[cpu - 1] && IS_ERR(ftbl)) {
486 dev_err(dev, "Failed to parse CPU%d's freq table\n",
487 cpu);
488 return PTR_ERR(ftbl);
489 }
490
491 /* Use previous CPU's table if it shares same clock */
492 if (cpu_clk[cpu] == cpu_clk[cpu - 1]) {
493 if (!IS_ERR(ftbl)) {
494 dev_warn(dev, "Conflicting tables for CPU%d\n",
495 cpu);
496 devm_kfree(dev, ftbl);
497 }
498 ftbl = per_cpu(freq_table, cpu - 1);
499 }
500 per_cpu(freq_table, cpu) = ftbl;
501 }
502
Maria Yu1f6dee72017-12-01 13:40:49 +0800503out_register:
Stephen Boyd072b57e2017-03-22 18:12:45 -0700504 ret = register_pm_notifier(&msm_cpufreq_pm_notifier);
505 if (ret)
506 return ret;
507
508 ret = cpufreq_register_driver(&msm_cpufreq_driver);
509 if (ret)
510 unregister_pm_notifier(&msm_cpufreq_pm_notifier);
511
512 return ret;
Stephen Boydce5a5782014-08-01 15:39:54 -0700513}
514
515static const struct of_device_id msm_cpufreq_match_table[] = {
516 { .compatible = "qcom,msm-cpufreq" },
517 {}
518};
519
520static struct platform_driver msm_cpufreq_plat_driver = {
521 .probe = msm_cpufreq_probe,
522 .driver = {
523 .name = "msm-cpufreq",
524 .of_match_table = msm_cpufreq_match_table,
525 },
526};
527
528static int __init msm_cpufreq_register(void)
529{
530 int cpu, rc;
531
532 for_each_possible_cpu(cpu) {
533 mutex_init(&(per_cpu(suspend_data, cpu).suspend_mutex));
534 per_cpu(suspend_data, cpu).device_suspended = 0;
Pavankumar Kondetid9d13ae2017-10-18 08:28:16 +0530535 per_cpu(cached_resolve_freq, cpu) = UINT_MAX;
Stephen Boydce5a5782014-08-01 15:39:54 -0700536 }
537
538 rc = platform_driver_register(&msm_cpufreq_plat_driver);
539 if (rc < 0) {
540 /* Unblock hotplug if msm-cpufreq probe fails */
Stephen Boyd6208a582017-03-02 15:09:13 -0800541 cpuhp_remove_state_nocalls(CPUHP_QCOM_CPUFREQ_PREPARE);
542 cpuhp_remove_state_nocalls(CPUHP_AP_QCOM_CPUFREQ_STARTING);
Stephen Boydce5a5782014-08-01 15:39:54 -0700543 for_each_possible_cpu(cpu)
544 mutex_destroy(&(per_cpu(suspend_data, cpu).
545 suspend_mutex));
546 return rc;
547 }
548
Stephen Boyd072b57e2017-03-22 18:12:45 -0700549 return 0;
Stephen Boydce5a5782014-08-01 15:39:54 -0700550}
551
552subsys_initcall(msm_cpufreq_register);
553
554static int __init msm_cpufreq_early_register(void)
555{
Stephen Boyd6208a582017-03-02 15:09:13 -0800556 int ret;
557
558 ret = cpuhp_setup_state_nocalls(CPUHP_AP_QCOM_CPUFREQ_STARTING,
559 "AP_QCOM_CPUFREQ_STARTING",
560 qcom_cpufreq_starting_cpu,
561 qcom_cpufreq_dying_cpu);
562 if (ret)
563 return ret;
564
565 ret = cpuhp_setup_state_nocalls(CPUHP_QCOM_CPUFREQ_PREPARE,
566 "QCOM_CPUFREQ_PREPARE",
567 qcom_cpufreq_up_cpu,
568 qcom_cpufreq_dead_cpu);
569 if (!ret)
570 return ret;
571 cpuhp_remove_state_nocalls(CPUHP_AP_QCOM_CPUFREQ_STARTING);
572 return ret;
Stephen Boydce5a5782014-08-01 15:39:54 -0700573}
574core_initcall(msm_cpufreq_early_register);