blob: 129ebceae579f2e8c48f2d6733429796a4fa3177 [file] [log] [blame]
Raja Mallik078db462018-04-19 16:14:47 +05301/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/debugfs.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/clk.h>
19#include <linux/clkdev.h>
20#include <linux/io.h>
21#include <linux/ktime.h>
22#include <linux/smp.h>
23#include <linux/tick.h>
24#include <linux/delay.h>
25#include <linux/platform_device.h>
26#include <linux/of_platform.h>
27#include <linux/of_address.h>
28#include <linux/msm-bus.h>
29#include <linux/uaccess.h>
30#include <linux/dma-mapping.h>
31#include <soc/qcom/spm.h>
32#include <soc/qcom/pm-legacy.h>
33#include <soc/qcom/scm.h>
34#include <soc/qcom/scm-boot.h>
35#include <asm/suspend.h>
36#include <asm/cacheflush.h>
37#include <asm/cputype.h>
38#include <asm/system_misc.h>
39#ifdef CONFIG_VFP
40#include <asm/vfp.h>
41#endif
42#include <soc/qcom/jtag.h>
43#include "pm-boot.h"
44#include "idle.h"
45
46#define SCM_CMD_TERMINATE_PC (0x2)
47#define SCM_CMD_CORE_HOTPLUGGED (0x10)
48#define SCM_FLUSH_FLAG_MASK (0x3)
49
50#define SCLK_HZ (32768)
51
52#define MAX_BUF_SIZE 1024
53
54static int msm_pm_debug_mask = 1;
55module_param_named(
56 debug_mask, msm_pm_debug_mask, int, 0664
57);
58
59enum {
60 MSM_PM_DEBUG_SUSPEND = BIT(0),
61 MSM_PM_DEBUG_POWER_COLLAPSE = BIT(1),
62 MSM_PM_DEBUG_SUSPEND_LIMITS = BIT(2),
63 MSM_PM_DEBUG_CLOCK = BIT(3),
64 MSM_PM_DEBUG_RESET_VECTOR = BIT(4),
65 MSM_PM_DEBUG_IDLE = BIT(5),
66 MSM_PM_DEBUG_IDLE_LIMITS = BIT(6),
67 MSM_PM_DEBUG_HOTPLUG = BIT(7),
68};
69
70enum msm_pc_count_offsets {
71 MSM_PC_ENTRY_COUNTER,
72 MSM_PC_EXIT_COUNTER,
73 MSM_PC_FALLTHRU_COUNTER,
74 MSM_PC_UNUSED,
75 MSM_PC_NUM_COUNTERS,
76};
77
78static bool msm_pm_ldo_retention_enabled = true;
79static bool msm_pm_tz_flushes_cache;
80static bool msm_pm_ret_no_pll_switch;
81static bool msm_no_ramp_down_pc;
82static struct msm_pm_sleep_status_data *msm_pm_slp_sts;
83static DEFINE_PER_CPU(struct clk *, cpu_clks);
84static struct clk *l2_clk;
85
86static long *msm_pc_debug_counters;
87
88static cpumask_t retention_cpus;
89static DEFINE_SPINLOCK(retention_lock);
90static DEFINE_MUTEX(msm_pc_debug_mutex);
91
92static bool msm_pm_is_L1_writeback(void)
93{
94 u32 cache_id = 0;
95
96#if defined(CONFIG_CPU_V7)
97 u32 sel = 0;
98
99 asm volatile ("mcr p15, 2, %[ccselr], c0, c0, 0\n\t"
100 "isb\n\t"
101 "mrc p15, 1, %[ccsidr], c0, c0, 0\n\t"
102 :[ccsidr]"=r" (cache_id)
103 :[ccselr]"r" (sel)
104 );
105 return cache_id & BIT(30);
106#elif defined(CONFIG_ARM64)
107 u32 sel = 0;
108
109 asm volatile("msr csselr_el1, %[ccselr]\n\t"
110 "isb\n\t"
111 "mrs %[ccsidr],ccsidr_el1\n\t"
112 :[ccsidr]"=r" (cache_id)
113 :[ccselr]"r" (sel)
114 );
115 return cache_id & BIT(30);
116#else
117#error No valid CPU arch selected
118#endif
119}
120
121static bool msm_pm_swfi(bool from_idle)
122{
123 msm_arch_idle();
124 return true;
125}
126
127static bool msm_pm_retention(bool from_idle)
128{
129 int ret = 0;
130 unsigned int cpu = smp_processor_id();
131 struct clk *cpu_clk = per_cpu(cpu_clks, cpu);
132
133 spin_lock(&retention_lock);
134
135 if (!msm_pm_ldo_retention_enabled)
136 goto bailout;
137
138 cpumask_set_cpu(cpu, &retention_cpus);
139 spin_unlock(&retention_lock);
140
141 if (!msm_pm_ret_no_pll_switch)
142 clk_disable(cpu_clk);
143
144 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_RETENTION, false);
145 WARN_ON(ret);
146
147 msm_arch_idle();
148
149 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
150 WARN_ON(ret);
151
152 if (!msm_pm_ret_no_pll_switch)
153 if (clk_enable(cpu_clk))
154 pr_err("%s(): Error restore cpu clk\n", __func__);
155
156 spin_lock(&retention_lock);
157 cpumask_clear_cpu(cpu, &retention_cpus);
158bailout:
159 spin_unlock(&retention_lock);
160 return true;
161}
162
163static inline void msm_pc_inc_debug_count(uint32_t cpu,
164 enum msm_pc_count_offsets offset)
165{
166 int cntr_offset;
167 uint32_t cluster_id = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
168 uint32_t cpu_id = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 0);
169
170 if (cluster_id >= MAX_NUM_CLUSTER || cpu_id >= MAX_CPUS_PER_CLUSTER)
171 WARN_ON(cpu);
172
173 cntr_offset = (cluster_id * MAX_CPUS_PER_CLUSTER * MSM_PC_NUM_COUNTERS)
174 + (cpu_id * MSM_PC_NUM_COUNTERS) + offset;
175
176 if (!msm_pc_debug_counters)
177 return;
178
179 msm_pc_debug_counters[cntr_offset]++;
180}
181
182static bool msm_pm_pc_hotplug(void)
183{
184 uint32_t cpu = smp_processor_id();
185 enum msm_pm_l2_scm_flag flag;
186 struct scm_desc desc;
187
188 flag = lpm_cpu_pre_pc_cb(cpu);
189
190 if (!msm_pm_tz_flushes_cache) {
191 if (flag == MSM_SCM_L2_OFF)
192 flush_cache_all();
193 else if (msm_pm_is_L1_writeback())
194 flush_cache_louis();
195 }
196
197 msm_pc_inc_debug_count(cpu, MSM_PC_ENTRY_COUNTER);
198
199 if (is_scm_armv8()) {
200 desc.args[0] = SCM_CMD_CORE_HOTPLUGGED |
201 (flag & SCM_FLUSH_FLAG_MASK);
202 desc.arginfo = SCM_ARGS(1);
203 scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT,
204 SCM_CMD_TERMINATE_PC), &desc);
205 } else {
206 scm_call_atomic1(SCM_SVC_BOOT, SCM_CMD_TERMINATE_PC,
207 SCM_CMD_CORE_HOTPLUGGED | (flag & SCM_FLUSH_FLAG_MASK));
208 }
209
210 /* Should not return here */
211 msm_pc_inc_debug_count(cpu, MSM_PC_FALLTHRU_COUNTER);
212 return 0;
213}
214
215static bool msm_pm_fastpc(bool from_idle)
216{
217 int ret = 0;
218 unsigned int cpu = smp_processor_id();
219
220 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_FASTPC, false);
221 WARN_ON(ret);
222
223 if (from_idle || cpu_online(cpu))
224 msm_arch_idle();
225 else
226 msm_pm_pc_hotplug();
227
228 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
229 WARN_ON(ret);
230
231 return true;
232}
233
234int msm_pm_collapse(unsigned long unused)
235{
236 uint32_t cpu = smp_processor_id();
237 enum msm_pm_l2_scm_flag flag;
238 struct scm_desc desc;
239
240 flag = lpm_cpu_pre_pc_cb(cpu);
241
242 if (!msm_pm_tz_flushes_cache) {
243 if (flag == MSM_SCM_L2_OFF)
244 flush_cache_all();
245 else if (msm_pm_is_L1_writeback())
246 flush_cache_louis();
247 }
248 msm_pc_inc_debug_count(cpu, MSM_PC_ENTRY_COUNTER);
249
250 if (is_scm_armv8()) {
251 desc.args[0] = flag;
252 desc.arginfo = SCM_ARGS(1);
253 scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT,
254 SCM_CMD_TERMINATE_PC), &desc);
255 } else {
256 scm_call_atomic1(SCM_SVC_BOOT, SCM_CMD_TERMINATE_PC, flag);
257 }
258
259 msm_pc_inc_debug_count(cpu, MSM_PC_FALLTHRU_COUNTER);
260
261 return 0;
262}
263EXPORT_SYMBOL(msm_pm_collapse);
264
265static bool __ref msm_pm_spm_power_collapse(
266 unsigned int cpu, int mode, bool from_idle, bool notify_rpm)
267{
268 void *entry;
269 bool collapsed = 0;
270 int ret;
271 bool save_cpu_regs = (cpu_online(cpu) || from_idle);
272
273 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
274 pr_info("CPU%u: %s: notify_rpm %d\n",
275 cpu, __func__, (int) notify_rpm);
276
277 ret = msm_spm_set_low_power_mode(mode, notify_rpm);
278 WARN_ON(ret);
279
280 entry = save_cpu_regs ? cpu_resume : msm_secondary_startup;
281
282 msm_pm_boot_config_before_pc(cpu, virt_to_phys(entry));
283
284 if (MSM_PM_DEBUG_RESET_VECTOR & msm_pm_debug_mask)
285 pr_info("CPU%u: %s: program vector to %pk\n",
286 cpu, __func__, entry);
287
288 msm_jtag_save_state();
289
290 collapsed = save_cpu_regs ?
291 !cpu_suspend(0, msm_pm_collapse) : msm_pm_pc_hotplug();
292
293 msm_jtag_restore_state();
294
295 if (collapsed)
296 local_fiq_enable();
297
298 msm_pm_boot_config_after_pc(cpu);
299
300 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
301 pr_info("CPU%u: %s: msm_pm_collapse returned, collapsed %d\n",
302 cpu, __func__, collapsed);
303
304 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
305 WARN_ON(ret);
306 return collapsed;
307}
308
309static bool msm_pm_power_collapse_standalone(
310 bool from_idle)
311{
312 unsigned int cpu = smp_processor_id();
313 bool collapsed;
314
315 collapsed = msm_pm_spm_power_collapse(cpu,
316 MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE,
317 from_idle, false);
318
319 return collapsed;
320}
321
322static int ramp_down_last_cpu(int cpu)
323{
324 struct clk *cpu_clk = per_cpu(cpu_clks, cpu);
325 int ret = 0;
326
327 clk_disable(cpu_clk);
328 clk_disable(l2_clk);
329
330 return ret;
331}
332
333static int ramp_up_first_cpu(int cpu, int saved_rate)
334{
335 struct clk *cpu_clk = per_cpu(cpu_clks, cpu);
336 int rc = 0;
337
338 if (MSM_PM_DEBUG_CLOCK & msm_pm_debug_mask)
339 pr_info("CPU%u: %s: restore clock rate\n",
340 cpu, __func__);
341
342 clk_enable(l2_clk);
343
344 if (cpu_clk) {
345 int ret = clk_enable(cpu_clk);
346
347 if (ret) {
348 pr_err("%s(): Error restoring cpu clk\n",
349 __func__);
350 return ret;
351 }
352 }
353
354 return rc;
355}
356
357static bool msm_pm_power_collapse(bool from_idle)
358{
359 unsigned int cpu = smp_processor_id();
360 unsigned long saved_acpuclk_rate = 0;
361 bool collapsed;
362
363 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
364 pr_info("CPU%u: %s: idle %d\n",
365 cpu, __func__, (int)from_idle);
366
367 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
368 pr_info("CPU%u: %s: pre power down\n", cpu, __func__);
369
370 if (cpu_online(cpu) && !msm_no_ramp_down_pc)
371 saved_acpuclk_rate = ramp_down_last_cpu(cpu);
372
373 collapsed = msm_pm_spm_power_collapse(cpu, MSM_SPM_MODE_POWER_COLLAPSE,
374 from_idle, true);
375
376 if (cpu_online(cpu) && !msm_no_ramp_down_pc)
377 ramp_up_first_cpu(cpu, saved_acpuclk_rate);
378
379 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
380 pr_info("CPU%u: %s: post power up\n", cpu, __func__);
381
382 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
383 pr_info("CPU%u: %s: return\n", cpu, __func__);
384 return collapsed;
385}
386/******************************************************************************
387 * External Idle/Suspend Functions
388 *****************************************************************************/
389
390static void arch_idle(void) {}
391
392static bool (*execute[MSM_PM_SLEEP_MODE_NR])(bool idle) = {
393 [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT] = msm_pm_swfi,
394 [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE] =
395 msm_pm_power_collapse_standalone,
396 [MSM_PM_SLEEP_MODE_RETENTION] = msm_pm_retention,
397 [MSM_PM_SLEEP_MODE_FASTPC] = msm_pm_fastpc,
398 [MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = msm_pm_power_collapse,
399};
400
401/**
402 * msm_cpu_pm_enter_sleep(): Enter a low power mode on current cpu
403 *
404 * @mode - sleep mode to enter
405 * @from_idle - bool to indicate that the mode is exercised during idle/suspend
406 *
407 * returns none
408 *
409 * The code should be with interrupts disabled and on the core on which the
410 * low power is to be executed.
411 *
412 */
413bool msm_cpu_pm_enter_sleep(enum msm_pm_sleep_mode mode, bool from_idle)
414{
415 bool exit_stat = false;
416 unsigned int cpu = smp_processor_id();
417
418 if ((!from_idle && cpu_online(cpu))
419 || (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask))
420 pr_info("CPU%u:%s mode:%d during %s\n", cpu, __func__,
421 mode, from_idle ? "idle" : "suspend");
422
423 if (execute[mode])
424 exit_stat = execute[mode](from_idle);
425
426 return exit_stat;
427}
428
429/**
430 * msm_pm_wait_cpu_shutdown() - Wait for a core to be power collapsed during
431 * hotplug
432 *
433 * @ cpu - cpu to wait on.
434 *
435 * Blocking function call that waits on the core to be power collapsed. This
436 * function is called from platform_cpu_die to ensure that a core is power
437 * collapsed before sending the CPU_DEAD notification so the drivers could
438 * remove the resource votes for this CPU(regulator and clock)
439 */
440int msm_pm_wait_cpu_shutdown(unsigned int cpu)
441{
442 int timeout = 0;
443
444 if (!msm_pm_slp_sts)
445 return 0;
446 if (!msm_pm_slp_sts[cpu].base_addr)
447 return 0;
448 while (1) {
449 /*
450 * Check for the SPM of the core being hotplugged to set
451 * its sleep state.The SPM sleep state indicates that the
452 * core has been power collapsed.
453 */
454 int acc_sts = __raw_readl(msm_pm_slp_sts[cpu].base_addr);
455
456 if (acc_sts & msm_pm_slp_sts[cpu].mask)
457 return 0;
458
459 udelay(100);
460 /*
461 * Dump spm registers for debugging
462 */
463 if (++timeout == 20) {
464 msm_spm_dump_regs(cpu);
465 __WARN_printf(
466 "CPU%u didn't collapse in 2ms, sleep status: 0x%x\n",
467 cpu, acc_sts);
468 }
469 }
470
471 return -EBUSY;
472}
473
474static void msm_pm_ack_retention_disable(void *data)
475{
476 /*
477 * This is a NULL function to ensure that the core has woken up
478 * and is safe to disable retention.
479 */
480}
481/**
482 * msm_pm_enable_retention() - Disable/Enable retention on all cores
483 * @enable: Enable/Disable retention
484 *
485 */
486void msm_pm_enable_retention(bool enable)
487{
488 if (enable == msm_pm_ldo_retention_enabled)
489 return;
490
491 msm_pm_ldo_retention_enabled = enable;
492
493 /*
494 * If retention is being disabled, wakeup all online core to ensure
495 * that it isn't executing retention. Offlined cores need not be woken
496 * up as they enter the deepest sleep mode, namely RPM assited power
497 * collapse
498 */
499 if (!enable) {
500 preempt_disable();
501 smp_call_function_many(&retention_cpus,
502 msm_pm_ack_retention_disable,
503 NULL, true);
504 preempt_enable();
505 }
506}
507EXPORT_SYMBOL(msm_pm_enable_retention);
508
509/**
510 * msm_pm_retention_enabled() - Check if retention is enabled
511 *
512 * returns true if retention is enabled
513 */
514bool msm_pm_retention_enabled(void)
515{
516 return msm_pm_ldo_retention_enabled;
517}
518EXPORT_SYMBOL(msm_pm_retention_enabled);
519
520static int msm_pm_snoc_client_probe(struct platform_device *pdev)
521{
522 int rc = 0;
523 static struct msm_bus_scale_pdata *msm_pm_bus_pdata;
524 static uint32_t msm_pm_bus_client;
525
526 msm_pm_bus_pdata = msm_bus_cl_get_pdata(pdev);
527
528 if (msm_pm_bus_pdata) {
529 msm_pm_bus_client =
530 msm_bus_scale_register_client(msm_pm_bus_pdata);
531
532 if (!msm_pm_bus_client) {
533 pr_err("%s: Failed to register SNOC client", __func__);
534 rc = -ENXIO;
535 goto snoc_cl_probe_done;
536 }
537
538 rc = msm_bus_scale_client_update_request(msm_pm_bus_client, 1);
539
540 if (rc)
541 pr_err("%s: Error setting bus rate", __func__);
542 }
543
544snoc_cl_probe_done:
545 return rc;
546}
547
548static int msm_cpu_status_probe(struct platform_device *pdev)
549{
550 u32 cpu;
551 int rc;
552
553 if (!pdev || !pdev->dev.of_node)
554 return -EFAULT;
555
556 msm_pm_slp_sts = devm_kzalloc(&pdev->dev,
557 sizeof(*msm_pm_slp_sts) * num_possible_cpus(),
558 GFP_KERNEL);
559
560 if (!msm_pm_slp_sts)
561 return -ENOMEM;
562
563
564 for_each_possible_cpu(cpu) {
565 struct device_node *cpun, *node;
566 char *key;
567
568 cpun = of_get_cpu_node(cpu, NULL);
569
570 if (!cpun) {
571 __WARN();
572 continue;
573 }
574
575 node = of_parse_phandle(cpun, "qcom,sleep-status", 0);
576 if (!node)
577 return -ENODEV;
578
579 msm_pm_slp_sts[cpu].base_addr = of_iomap(node, 0);
580 if (!msm_pm_slp_sts[cpu].base_addr) {
581 pr_err("%s: Can't find base addr\n", __func__);
582 return -ENODEV;
583 }
584
585 key = "qcom,sleep-status-mask";
586 rc = of_property_read_u32(node, key, &msm_pm_slp_sts[cpu].mask);
587 if (rc) {
588 pr_err("%s: Can't find %s property\n", __func__, key);
589 iounmap(msm_pm_slp_sts[cpu].base_addr);
590 return rc;
591 }
592 }
593
594 return 0;
595};
596
597static const struct of_device_id msm_slp_sts_match_tbl[] = {
598 {.compatible = "qcom,cpu-sleep-status"},
599 {},
600};
601
602static struct platform_driver msm_cpu_status_driver = {
603 .probe = msm_cpu_status_probe,
604 .driver = {
605 .name = "cpu_slp_status",
606 .owner = THIS_MODULE,
607 .of_match_table = msm_slp_sts_match_tbl,
608 },
609};
610
611static const struct of_device_id msm_snoc_clnt_match_tbl[] = {
612 {.compatible = "qcom,pm-snoc-client"},
613 {},
614};
615
616static struct platform_driver msm_cpu_pm_snoc_client_driver = {
617 .probe = msm_pm_snoc_client_probe,
618 .driver = {
619 .name = "pm_snoc_client",
620 .owner = THIS_MODULE,
621 .of_match_table = msm_snoc_clnt_match_tbl,
622 },
623};
624
625struct msm_pc_debug_counters_buffer {
626 long *reg;
627 u32 len;
628 char buf[MAX_BUF_SIZE];
629};
630
631static char *counter_name[MSM_PC_NUM_COUNTERS] = {
632 "PC Entry Counter",
633 "Warmboot Entry Counter",
634 "PC Bailout Counter"
635};
636
637static int msm_pc_debug_counters_copy(
638 struct msm_pc_debug_counters_buffer *data)
639{
640 int j;
641 u32 stat;
642 unsigned int cpu;
643 unsigned int len;
644 uint32_t cluster_id;
645 uint32_t cpu_id;
646 uint32_t offset;
647
648 for_each_possible_cpu(cpu) {
649 len = scnprintf(data->buf + data->len,
650 sizeof(data->buf)-data->len,
651 "CPU%d\n", cpu);
652 cluster_id = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
653 cpu_id = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 0);
654 offset = (cluster_id * MAX_CPUS_PER_CLUSTER
655 * MSM_PC_NUM_COUNTERS)
656 + (cpu_id * MSM_PC_NUM_COUNTERS);
657
658 data->len += len;
659
660 for (j = 0; j < MSM_PC_NUM_COUNTERS - 1; j++) {
661 stat = data->reg[offset + j];
662 len = scnprintf(data->buf + data->len,
663 sizeof(data->buf) - data->len,
664 "\t%s: %d", counter_name[j], stat);
665
666 data->len += len;
667 }
668 len = scnprintf(data->buf + data->len,
669 sizeof(data->buf) - data->len,
670 "\n");
671
672 data->len += len;
673 }
674
675 return data->len;
676}
677
678static ssize_t msm_pc_debug_counters_file_read(struct file *file,
679 char __user *bufu, size_t count, loff_t *ppos)
680{
681 struct msm_pc_debug_counters_buffer *data;
682 ssize_t ret;
683
684 mutex_lock(&msm_pc_debug_mutex);
685 data = file->private_data;
686
687 if (!data) {
688 ret = -EINVAL;
689 goto exit;
690 }
691
692 if (!bufu) {
693 ret = -EINVAL;
694 goto exit;
695 }
696
697 if (!access_ok(VERIFY_WRITE, bufu, count)) {
698 ret = -EFAULT;
699 goto exit;
700 }
701
702 if (*ppos >= data->len && data->len == 0)
703 data->len = msm_pc_debug_counters_copy(data);
704
705 ret = simple_read_from_buffer(bufu, count, ppos,
706 data->buf, data->len);
707exit:
708 mutex_unlock(&msm_pc_debug_mutex);
709 return ret;
710}
711
712static int msm_pc_debug_counters_file_open(struct inode *inode,
713 struct file *file)
714{
715 struct msm_pc_debug_counters_buffer *buf;
716 int ret = 0;
717
718 mutex_lock(&msm_pc_debug_mutex);
719
720 if (!inode->i_private) {
721 ret = -EINVAL;
722 goto exit;
723 }
724
725 file->private_data = kzalloc(
726 sizeof(struct msm_pc_debug_counters_buffer), GFP_KERNEL);
727
728 if (!file->private_data) {
729 pr_err("%s: ERROR kmalloc failed to allocate %zu bytes\n",
730 __func__, sizeof(struct msm_pc_debug_counters_buffer));
731
732 ret = -ENOMEM;
733 goto exit;
734 }
735
736 buf = file->private_data;
737 buf->reg = (long *)inode->i_private;
738
739exit:
740 mutex_unlock(&msm_pc_debug_mutex);
741 return ret;
742}
743
744static int msm_pc_debug_counters_file_close(struct inode *inode,
745 struct file *file)
746{
747 mutex_lock(&msm_pc_debug_mutex);
748 kfree(file->private_data);
749 mutex_unlock(&msm_pc_debug_mutex);
750 return 0;
751}
752
753static const struct file_operations msm_pc_debug_counters_fops = {
754 .open = msm_pc_debug_counters_file_open,
755 .read = msm_pc_debug_counters_file_read,
756 .release = msm_pc_debug_counters_file_close,
757 .llseek = no_llseek,
758};
759
760static int msm_pm_clk_init(struct platform_device *pdev)
761{
762 bool synced_clocks;
763 u32 cpu;
764 char clk_name[] = "cpu??_clk";
765 char *key;
766
767 key = "qcom,saw-turns-off-pll";
768 if (of_property_read_bool(pdev->dev.of_node, key))
769 return 0;
770
771 key = "qcom,synced-clocks";
772 synced_clocks = of_property_read_bool(pdev->dev.of_node, key);
773
774 for_each_possible_cpu(cpu) {
775 struct clk *clk;
776
777 snprintf(clk_name, sizeof(clk_name), "cpu%d_clk", cpu);
778 clk = clk_get(&pdev->dev, clk_name);
779 if (IS_ERR(clk)) {
780 if (cpu && synced_clocks)
781 return 0;
782 clk = NULL;
783 }
784 per_cpu(cpu_clks, cpu) = clk;
785 }
786
787 if (synced_clocks)
788 return 0;
789
790 l2_clk = clk_get(&pdev->dev, "l2_clk");
791 if (IS_ERR(l2_clk))
792 pr_warn("%s: Could not get l2_clk (-%ld)\n", __func__,
793 PTR_ERR(l2_clk));
794
795 return 0;
796}
797
798static int msm_cpu_pm_probe(struct platform_device *pdev)
799{
800 struct dentry *dent = NULL;
801 struct resource *res = NULL;
802 int ret = 0;
803 void __iomem *msm_pc_debug_counters_imem;
804 char *key;
805 int alloc_size = (MAX_NUM_CLUSTER * MAX_CPUS_PER_CLUSTER
806 * MSM_PC_NUM_COUNTERS
807 * sizeof(*msm_pc_debug_counters));
808
809 msm_pc_debug_counters = dma_alloc_coherent(&pdev->dev, alloc_size,
810 &msm_pc_debug_counters_phys, GFP_KERNEL);
811
812 if (msm_pc_debug_counters) {
813 memset(msm_pc_debug_counters, 0, alloc_size);
814 dent = debugfs_create_file("pc_debug_counter", 0444, NULL,
815 msm_pc_debug_counters,
816 &msm_pc_debug_counters_fops);
817 if (!dent)
818 pr_err("%s: ERROR debugfs_create_file failed\n",
819 __func__);
820 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
821 if (!res)
822 goto skip_save_imem;
823 msm_pc_debug_counters_imem = devm_ioremap(&pdev->dev,
824 res->start, resource_size(res));
825 if (msm_pc_debug_counters_imem) {
826 writel_relaxed(msm_pc_debug_counters_phys,
827 msm_pc_debug_counters_imem);
828 /* memory barrier */
829 mb();
830 devm_iounmap(&pdev->dev,
831 msm_pc_debug_counters_imem);
832 }
833 } else {
834 msm_pc_debug_counters = NULL;
835 msm_pc_debug_counters_phys = 0;
836 }
837skip_save_imem:
838 if (pdev->dev.of_node) {
839 key = "qcom,tz-flushes-cache";
840 msm_pm_tz_flushes_cache =
841 of_property_read_bool(pdev->dev.of_node, key);
842
843 key = "qcom,no-pll-switch-for-retention";
844 msm_pm_ret_no_pll_switch =
845 of_property_read_bool(pdev->dev.of_node, key);
846
847 ret = msm_pm_clk_init(pdev);
848 if (ret) {
849 pr_info("msm_pm_clk_init returned error\n");
850 return ret;
851 }
852 }
853
854 if (pdev->dev.of_node)
855 of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
856
857 return ret;
858}
859
860static const struct of_device_id msm_cpu_pm_table[] = {
861 {.compatible = "qcom,pm"},
862 {},
863};
864
865static struct platform_driver msm_cpu_pm_driver = {
866 .probe = msm_cpu_pm_probe,
867 .driver = {
868 .name = "msm-pm",
869 .owner = THIS_MODULE,
870 .of_match_table = msm_cpu_pm_table,
871 },
872};
873
874static int __init msm_pm_drv_init(void)
875{
876 int rc;
877
878 cpumask_clear(&retention_cpus);
879
880 rc = platform_driver_register(&msm_cpu_pm_snoc_client_driver);
881
882 if (rc)
883 pr_err("%s(): failed to register driver %s\n", __func__,
884 msm_cpu_pm_snoc_client_driver.driver.name);
885 return rc;
886}
887late_initcall(msm_pm_drv_init);
888
889static int __init msm_pm_debug_counters_init(void)
890{
891 int rc;
892
893 rc = platform_driver_register(&msm_cpu_pm_driver);
894
895 if (rc)
896 pr_err("%s(): failed to register driver %s\n", __func__,
897 msm_cpu_pm_driver.driver.name);
898 return rc;
899}
900fs_initcall(msm_pm_debug_counters_init);
901
902int __init msm_pm_sleep_status_init(void)
903{
904 static bool registered;
905
906 if (registered)
907 return 0;
908 registered = true;
909
910 return platform_driver_register(&msm_cpu_status_driver);
911}
912arch_initcall(msm_pm_sleep_status_init);
913
914#ifdef CONFIG_ARM
915static int idle_initialize(void)
916{
917 arm_pm_idle = arch_idle;
918 return 0;
919}
920early_initcall(idle_initialize);
921#endif