blob: 4e948767595221aef81e503335214de7e44f0d2a [file] [log] [blame]
Raja Mallikb9ad4522018-04-19 15:23:49 +05301/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/slab.h>
18#include <linux/platform_device.h>
19#include <linux/mutex.h>
20#include <linux/cpu.h>
21#include <linux/of.h>
22#include <linux/hrtimer.h>
23#include <linux/ktime.h>
24#include <linux/tick.h>
25#include <linux/suspend.h>
26#include <linux/pm_qos.h>
27#include <linux/of_platform.h>
28#include <linux/smp.h>
29#include <linux/remote_spinlock.h>
30#include <linux/msm_remote_spinlock.h>
31#include <linux/dma-mapping.h>
32#include <linux/coresight-cti.h>
33#include <linux/moduleparam.h>
34#include <linux/sched.h>
35#include <linux/cpu_pm.h>
36#include <soc/qcom/spm.h>
37#include <soc/qcom/pm-legacy.h>
38#include <soc/qcom/rpm-notifier.h>
39#include <soc/qcom/event_timer.h>
40#include <soc/qcom/lpm-stats.h>
41#include <soc/qcom/lpm_levels.h>
42#include <soc/qcom/jtag.h>
43#include <asm/cputype.h>
44#include <asm/arch_timer.h>
45#include <asm/cacheflush.h>
46#include <asm/suspend.h>
47#include "lpm-levels-legacy.h"
48#include "lpm-workarounds.h"
49#include <trace/events/power.h>
50#define CREATE_TRACE_POINTS
51#include <trace/events/trace_msm_low_power.h>
52#if defined(CONFIG_COMMON_CLK)
53#include "../clk/clk.h"
54#elif defined(CONFIG_COMMON_CLK_MSM)
55#include "../../drivers/clk/msm/clock.h"
56#endif /* CONFIG_COMMON_CLK */
57#include <soc/qcom/minidump.h>
58
59#define SCLK_HZ (32768)
60#define SCM_HANDOFF_LOCK_ID "S:7"
61#define PSCI_POWER_STATE(reset) (reset << 30)
62#define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24)
63static remote_spinlock_t scm_handoff_lock;
64
65enum {
66 MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0),
67 MSM_LPM_LVL_DBG_IDLE_LIMITS = BIT(1),
68};
69
70enum debug_event {
71 CPU_ENTER,
72 CPU_EXIT,
73 CLUSTER_ENTER,
74 CLUSTER_EXIT,
75 PRE_PC_CB,
76 CPU_HP_STARTING,
77 CPU_HP_DYING,
78};
79
80struct lpm_debug {
81 cycle_t time;
82 enum debug_event evt;
83 int cpu;
84 uint32_t arg1;
85 uint32_t arg2;
86 uint32_t arg3;
87 uint32_t arg4;
88};
89
90static struct system_pm_ops *sys_pm_ops;
91struct lpm_cluster *lpm_root_node;
92
93static DEFINE_PER_CPU(struct lpm_cluster*, cpu_cluster);
94static bool suspend_in_progress;
95static struct hrtimer lpm_hrtimer;
96static struct lpm_debug *lpm_debug;
97static phys_addr_t lpm_debug_phys;
98
99static const int num_dbg_elements = 0x100;
100
101static void cluster_unprepare(struct lpm_cluster *cluster,
102 const struct cpumask *cpu, int child_idx, bool from_idle,
Maulik Shah083e22a2018-11-29 14:30:07 +0530103 int64_t time, bool success);
Raja Mallikb9ad4522018-04-19 15:23:49 +0530104static void cluster_prepare(struct lpm_cluster *cluster,
105 const struct cpumask *cpu, int child_idx, bool from_idle,
106 int64_t time);
107
108static bool menu_select;
109module_param_named(
110 menu_select, menu_select, bool, 0664
111);
112
113static bool print_parsed_dt;
114module_param_named(
115 print_parsed_dt, print_parsed_dt, bool, 0664
116);
117
118static bool sleep_disabled;
119module_param_named(sleep_disabled,
120 sleep_disabled, bool, 0664);
121
122s32 msm_cpuidle_get_deep_idle_latency(void)
123{
124 return 10;
125}
126EXPORT_SYMBOL(msm_cpuidle_get_deep_idle_latency);
127
128uint32_t register_system_pm_ops(struct system_pm_ops *pm_ops)
129{
130 if (sys_pm_ops)
131 return -EUSERS;
132
133 sys_pm_ops = pm_ops;
134
135 return 0;
136}
137
138static uint32_t least_cluster_latency(struct lpm_cluster *cluster,
139 struct latency_level *lat_level)
140{
141 struct list_head *list;
142 struct lpm_cluster_level *level;
143 struct lpm_cluster *n;
144 struct power_params *pwr_params;
145 uint32_t latency = 0;
146 int i;
147
148 if (!cluster->list.next) {
149 for (i = 0; i < cluster->nlevels; i++) {
150 level = &cluster->levels[i];
151 pwr_params = &level->pwr;
152 if (lat_level->reset_level == level->reset_level) {
153 if ((latency > pwr_params->latency_us)
154 || (!latency))
155 latency = pwr_params->latency_us;
156 break;
157 }
158 }
159 } else {
160 list_for_each(list, &cluster->parent->child) {
161 n = list_entry(list, typeof(*n), list);
162 if (lat_level->level_name) {
163 if (strcmp(lat_level->level_name,
164 n->cluster_name))
165 continue;
166 }
167 for (i = 0; i < n->nlevels; i++) {
168 level = &n->levels[i];
169 pwr_params = &level->pwr;
170 if (lat_level->reset_level ==
171 level->reset_level) {
172 if ((latency > pwr_params->latency_us)
173 || (!latency))
174 latency =
175 pwr_params->latency_us;
176 break;
177 }
178 }
179 }
180 }
181 return latency;
182}
183
184static uint32_t least_cpu_latency(struct list_head *child,
185 struct latency_level *lat_level)
186{
187 struct list_head *list;
188 struct lpm_cpu_level *level;
189 struct power_params *pwr_params;
190 struct lpm_cpu *cpu;
191 struct lpm_cluster *n;
192 uint32_t latency = 0;
193 int i;
194
195 list_for_each(list, child) {
196 n = list_entry(list, typeof(*n), list);
197 if (lat_level->level_name) {
198 if (strcmp(lat_level->level_name, n->cluster_name))
199 continue;
200 }
201 cpu = n->cpu;
202 for (i = 0; i < cpu->nlevels; i++) {
203 level = &cpu->levels[i];
204 pwr_params = &level->pwr;
205 if (lat_level->reset_level == level->reset_level) {
206 if ((latency > pwr_params->latency_us)
207 || (!latency))
208 latency = pwr_params->latency_us;
209 break;
210 }
211 }
212 }
213 return latency;
214}
215
216static struct lpm_cluster *cluster_aff_match(struct lpm_cluster *cluster,
217 int affinity_level)
218{
219 struct lpm_cluster *n;
220
221 if ((cluster->aff_level == affinity_level)
222 || ((cluster->cpu) && (affinity_level == 0)))
223 return cluster;
224 else if (!cluster->cpu) {
225 n = list_entry(cluster->child.next, typeof(*n), list);
226 return cluster_aff_match(n, affinity_level);
227 } else
228 return NULL;
229}
230
231int lpm_get_latency(struct latency_level *level, uint32_t *latency)
232{
233 struct lpm_cluster *cluster;
234 uint32_t val;
235
236 if (!lpm_root_node) {
237 pr_err("%s: lpm_probe not completed\n", __func__);
238 return -EAGAIN;
239 }
240
241 if ((level->affinity_level < 0)
242 || (level->affinity_level > lpm_root_node->aff_level)
243 || (level->reset_level < LPM_RESET_LVL_RET)
244 || (level->reset_level > LPM_RESET_LVL_PC)
245 || !latency)
246 return -EINVAL;
247
248 cluster = cluster_aff_match(lpm_root_node, level->affinity_level);
249 if (!cluster) {
250 pr_err("%s:No matching cluster found for affinity_level:%d\n",
251 __func__, level->affinity_level);
252 return -EINVAL;
253 }
254
255 if (level->affinity_level == 0)
256 val = least_cpu_latency(&cluster->parent->child, level);
257 else
258 val = least_cluster_latency(cluster, level);
259
260 if (!val) {
261 pr_err("%s:No mode with affinity_level:%d reset_level:%d\n",
262 __func__, level->affinity_level, level->reset_level);
263 return -EINVAL;
264 }
265
266 *latency = val;
267
268 return 0;
269}
270EXPORT_SYMBOL(lpm_get_latency);
271
272static void update_debug_pc_event(enum debug_event event, uint32_t arg1,
273 uint32_t arg2, uint32_t arg3, uint32_t arg4)
274{
275 struct lpm_debug *dbg;
276 int idx;
277 static DEFINE_SPINLOCK(debug_lock);
278 static int pc_event_index;
279
280 if (!lpm_debug)
281 return;
282
283 spin_lock(&debug_lock);
284 idx = pc_event_index++;
285 dbg = &lpm_debug[idx & (num_dbg_elements - 1)];
286
287 dbg->evt = event;
288 dbg->time = arch_counter_get_cntpct();
289 dbg->cpu = raw_smp_processor_id();
290 dbg->arg1 = arg1;
291 dbg->arg2 = arg2;
292 dbg->arg3 = arg3;
293 dbg->arg4 = arg4;
294 spin_unlock(&debug_lock);
295}
296
297static enum hrtimer_restart lpm_hrtimer_cb(struct hrtimer *h)
298{
299 return HRTIMER_NORESTART;
300}
301
302static void msm_pm_set_timer(uint32_t modified_time_us)
303{
304 u64 modified_time_ns = modified_time_us * NSEC_PER_USEC;
305 ktime_t modified_ktime = ns_to_ktime(modified_time_ns);
306
307 lpm_hrtimer.function = lpm_hrtimer_cb;
308 hrtimer_start(&lpm_hrtimer, modified_ktime, HRTIMER_MODE_REL_PINNED);
309}
310
311int set_l2_mode(struct low_power_ops *ops, int mode,
312 struct lpm_cluster_level *level)
313{
314 int lpm = mode;
315 int rc = 0;
316 bool notify_rpm = level->notify_rpm;
317 struct low_power_ops *cpu_ops = per_cpu(cpu_cluster,
318 smp_processor_id())->lpm_dev;
319
320 if (cpu_ops->tz_flag & MSM_SCM_L2_OFF ||
321 cpu_ops->tz_flag & MSM_SCM_L2_GDHS)
322 coresight_cti_ctx_restore();
323
324 switch (mode) {
325 case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
326 case MSM_SPM_MODE_POWER_COLLAPSE:
327 case MSM_SPM_MODE_FASTPC:
328 if (level->no_cache_flush)
329 cpu_ops->tz_flag = MSM_SCM_L2_GDHS;
330 else
331 cpu_ops->tz_flag = MSM_SCM_L2_OFF;
332 coresight_cti_ctx_save();
333 break;
334 case MSM_SPM_MODE_GDHS:
335 cpu_ops->tz_flag = MSM_SCM_L2_GDHS;
336 coresight_cti_ctx_save();
337 break;
338 case MSM_SPM_MODE_CLOCK_GATING:
339 case MSM_SPM_MODE_RETENTION:
340 case MSM_SPM_MODE_DISABLED:
341 cpu_ops->tz_flag = MSM_SCM_L2_ON;
342 break;
343 default:
344 cpu_ops->tz_flag = MSM_SCM_L2_ON;
345 lpm = MSM_SPM_MODE_DISABLED;
346 break;
347 }
348
Anil Kumar Mamidala3b237172016-03-30 20:45:33 +0530349 if (lpm_wa_get_skip_l2_spm())
350 rc = msm_spm_config_low_power_mode_addr(ops->spm, lpm,
351 notify_rpm);
352 else
353 rc = msm_spm_config_low_power_mode(ops->spm, lpm, notify_rpm);
Raja Mallikb9ad4522018-04-19 15:23:49 +0530354
355 if (rc)
356 pr_err("%s: Failed to set L2 low power mode %d, ERR %d",
357 __func__, lpm, rc);
358
359 return rc;
360}
361
362int set_l3_mode(struct low_power_ops *ops, int mode,
363 struct lpm_cluster_level *level)
364{
365 bool notify_rpm = level->notify_rpm;
366 struct low_power_ops *cpu_ops = per_cpu(cpu_cluster,
367 smp_processor_id())->lpm_dev;
368
369 switch (mode) {
370 case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
371 case MSM_SPM_MODE_POWER_COLLAPSE:
372 case MSM_SPM_MODE_FASTPC:
373 cpu_ops->tz_flag |= MSM_SCM_L3_PC_OFF;
374 break;
375 default:
376 break;
377 }
378 return msm_spm_config_low_power_mode(ops->spm, mode, notify_rpm);
379}
380
381
382int set_system_mode(struct low_power_ops *ops, int mode,
383 struct lpm_cluster_level *level)
384{
385 bool notify_rpm = level->notify_rpm;
386
387 return msm_spm_config_low_power_mode(ops->spm, mode, notify_rpm);
388}
389
390static int set_device_mode(struct lpm_cluster *cluster, int ndevice,
391 struct lpm_cluster_level *level)
392{
393 struct low_power_ops *ops;
394
395 if (use_psci)
396 return 0;
397
398 ops = &cluster->lpm_dev[ndevice];
399 if (ops && ops->set_mode)
400 return ops->set_mode(ops, level->mode[ndevice],
401 level);
402 else
403 return -EINVAL;
404}
405
406static int cpu_power_select(struct cpuidle_device *dev,
407 struct lpm_cpu *cpu)
408{
409 int best_level = 0;
410 uint32_t latency_us = pm_qos_request_for_cpu(PM_QOS_CPU_DMA_LATENCY,
411 dev->cpu);
412 s64 sleep_us = ktime_to_us(tick_nohz_get_sleep_length());
413 uint32_t modified_time_us = 0;
414 uint32_t next_event_us = 0;
415 int i;
416 uint32_t lvl_latency_us = 0;
417 uint32_t *residency = get_per_cpu_max_residency(dev->cpu);
418
419 if (!cpu)
420 return best_level;
421
422 if ((sleep_disabled && !cpu_isolated(dev->cpu)) || sleep_us < 0)
423 return 0;
424
425 next_event_us = (uint32_t)(ktime_to_us(get_next_event_time(dev->cpu)));
426
427 for (i = 0; i < cpu->nlevels; i++) {
428 struct lpm_cpu_level *level = &cpu->levels[i];
429 struct power_params *pwr_params = &level->pwr;
430 uint32_t next_wakeup_us = (uint32_t)sleep_us;
431 enum msm_pm_sleep_mode mode = level->mode;
432 bool allow;
433
434 allow = lpm_cpu_mode_allow(dev->cpu, i, true);
435
436 if (!allow)
437 continue;
438
439 lvl_latency_us = pwr_params->latency_us;
440
441 if (latency_us < lvl_latency_us)
442 break;
443
444 if (next_event_us) {
445 if (next_event_us < lvl_latency_us)
446 break;
447
448 if (((next_event_us - lvl_latency_us) < sleep_us) ||
449 (next_event_us < sleep_us))
450 next_wakeup_us = next_event_us - lvl_latency_us;
451 }
452
453 best_level = i;
454
455 if (next_event_us && next_event_us < sleep_us &&
456 (mode != MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT))
457 modified_time_us
458 = next_event_us - lvl_latency_us;
459 else
460 modified_time_us = 0;
461
462 if (next_wakeup_us <= residency[i])
463 break;
464 }
465
466 if (modified_time_us)
467 msm_pm_set_timer(modified_time_us);
468
469 trace_cpu_power_select(best_level, sleep_us, latency_us, next_event_us);
470
471 return best_level;
472}
473
474static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster,
475 struct cpumask *mask, bool from_idle)
476{
477 int cpu;
478 int next_cpu = raw_smp_processor_id();
479 ktime_t next_event;
480 struct cpumask online_cpus_in_cluster;
481
482 next_event.tv64 = KTIME_MAX;
483 if (!from_idle) {
484 if (mask)
485 cpumask_copy(mask, cpumask_of(raw_smp_processor_id()));
486 return ~0ULL;
487 }
488
489 cpumask_and(&online_cpus_in_cluster,
490 &cluster->num_children_in_sync, cpu_online_mask);
491
492 for_each_cpu(cpu, &online_cpus_in_cluster) {
493 ktime_t *next_event_c;
494
495 next_event_c = get_next_event_cpu(cpu);
496 if (next_event_c->tv64 < next_event.tv64) {
497 next_event.tv64 = next_event_c->tv64;
498 next_cpu = cpu;
499 }
500 }
501
502 if (mask)
503 cpumask_copy(mask, cpumask_of(next_cpu));
504
505
506 if (ktime_to_us(next_event) > ktime_to_us(ktime_get()))
507 return ktime_to_us(ktime_sub(next_event, ktime_get()));
508 else
509 return 0;
510}
511
512static int cluster_select(struct lpm_cluster *cluster, bool from_idle)
513{
514 int best_level = -1;
515 int i;
516 struct cpumask mask;
517 uint32_t latency_us = ~0U;
518 uint32_t sleep_us;
519
520 if (!cluster)
521 return -EINVAL;
522
523 sleep_us = (uint32_t)get_cluster_sleep_time(cluster, NULL, from_idle);
524
525 if (cpumask_and(&mask, cpu_online_mask, &cluster->child_cpus))
526 latency_us = pm_qos_request_for_cpumask(PM_QOS_CPU_DMA_LATENCY,
527 &mask);
528
529 /*
530 * If atleast one of the core in the cluster is online, the cluster
531 * low power modes should be determined by the idle characteristics
532 * even if the last core enters the low power mode as a part of
533 * hotplug.
534 */
535
536 if (!from_idle && num_online_cpus() > 1 &&
537 cpumask_intersects(&cluster->child_cpus, cpu_online_mask))
538 from_idle = true;
539
540 for (i = 0; i < cluster->nlevels; i++) {
541 struct lpm_cluster_level *level = &cluster->levels[i];
542 struct power_params *pwr_params = &level->pwr;
543
544 if (!lpm_cluster_mode_allow(cluster, i, from_idle))
545 continue;
546
547 if (level->last_core_only &&
548 cpumask_weight(cpu_online_mask) > 1)
549 continue;
550
551 if (!cpumask_equal(&cluster->num_children_in_sync,
552 &level->num_cpu_votes))
553 continue;
554
555 if (from_idle && latency_us < pwr_params->latency_us)
556 break;
557
558 if (sleep_us < pwr_params->time_overhead_us)
559 break;
560
561 if (suspend_in_progress && from_idle && level->notify_rpm)
562 continue;
563
564 if (level->notify_rpm) {
565 if (!(sys_pm_ops && sys_pm_ops->sleep_allowed))
566 continue;
567 if (!sys_pm_ops->sleep_allowed())
568 continue;
569 }
570
571 best_level = i;
572
573 if (from_idle && sleep_us <= pwr_params->max_residency)
574 break;
575 }
576
577 return best_level;
578}
579
580static void cluster_notify(struct lpm_cluster *cluster,
581 struct lpm_cluster_level *level, bool enter)
582{
583 if (level->is_reset && enter)
584 cpu_cluster_pm_enter(cluster->aff_level);
585 else if (level->is_reset && !enter)
586 cpu_cluster_pm_exit(cluster->aff_level);
587}
588
589static unsigned int get_next_online_cpu(bool from_idle)
590{
591 unsigned int cpu;
592 ktime_t next_event;
593 unsigned int next_cpu = raw_smp_processor_id();
594
595 if (!from_idle)
596 return next_cpu;
597 next_event.tv64 = KTIME_MAX;
598 for_each_online_cpu(cpu) {
599 ktime_t *next_event_c;
600
601 next_event_c = get_next_event_cpu(cpu);
602 if (next_event_c->tv64 < next_event.tv64) {
603 next_event.tv64 = next_event_c->tv64;
604 next_cpu = cpu;
605 }
606 }
607 return next_cpu;
608}
609
610static int cluster_configure(struct lpm_cluster *cluster, int idx,
611 bool from_idle)
612{
613 struct lpm_cluster_level *level = &cluster->levels[idx];
614 struct cpumask cpumask;
615 unsigned int cpu;
616 int ret, i;
617
618 if (!cpumask_equal(&cluster->num_children_in_sync, &cluster->child_cpus)
619 || is_IPI_pending(&cluster->num_children_in_sync)) {
620 return -EPERM;
621 }
622
623 if (idx != cluster->default_level) {
624 update_debug_pc_event(CLUSTER_ENTER, idx,
625 cluster->num_children_in_sync.bits[0],
626 cluster->child_cpus.bits[0], from_idle);
627 trace_cluster_enter(cluster->cluster_name, idx,
628 cluster->num_children_in_sync.bits[0],
629 cluster->child_cpus.bits[0], from_idle);
630 lpm_stats_cluster_enter(cluster->stats, idx);
631 }
632
633 for (i = 0; i < cluster->ndevices; i++) {
634 ret = set_device_mode(cluster, i, level);
635 if (ret)
636 goto failed_set_mode;
637 }
638
639 if (level->notify_rpm) {
640 struct cpumask *nextcpu;
641
642 cpu = get_next_online_cpu(from_idle);
643 cpumask_copy(&cpumask, cpumask_of(cpu));
644 nextcpu = level->disable_dynamic_routing ? NULL : &cpumask;
645
Raghavendra Kakarla63d9cd12018-06-26 19:33:58 +0530646 if (sys_pm_ops && sys_pm_ops->enter) {
647 ret = sys_pm_ops->enter(nextcpu);
648 if (ret)
649 goto failed_set_mode;
650 }
Maulik Shah735e1e72016-08-01 12:11:13 +0530651
652 if (cluster->no_saw_devices && !use_psci)
653 msm_spm_set_rpm_hs(true);
Raja Mallikb9ad4522018-04-19 15:23:49 +0530654 }
655
656 /* Notify cluster enter event after successfully config completion */
657 cluster_notify(cluster, level, true);
658
659 cluster->last_level = idx;
660 return 0;
661
662failed_set_mode:
663
664 for (i = 0; i < cluster->ndevices; i++) {
665 int rc = 0;
666
667 level = &cluster->levels[cluster->default_level];
668 rc = set_device_mode(cluster, i, level);
669 WARN_ON(rc);
670 }
671 return ret;
672}
673
674static void cluster_prepare(struct lpm_cluster *cluster,
675 const struct cpumask *cpu, int child_idx, bool from_idle,
676 int64_t start_time)
677{
678 int i;
679
680 if (!cluster)
681 return;
682
683 if (cluster->min_child_level > child_idx)
684 return;
685
686 spin_lock(&cluster->sync_lock);
687 cpumask_or(&cluster->num_children_in_sync, cpu,
688 &cluster->num_children_in_sync);
689
690 for (i = 0; i < cluster->nlevels; i++) {
691 struct lpm_cluster_level *lvl = &cluster->levels[i];
692
693 if (child_idx >= lvl->min_child_level)
694 cpumask_or(&lvl->num_cpu_votes, cpu,
695 &lvl->num_cpu_votes);
696 }
697
698 /*
699 * cluster_select() does not make any configuration changes. So its ok
700 * to release the lock here. If a core wakes up for a rude request,
701 * it need not wait for another to finish its cluster selection and
702 * configuration process
703 */
704
705 if (!cpumask_equal(&cluster->num_children_in_sync,
706 &cluster->child_cpus))
707 goto failed;
708
709 i = cluster_select(cluster, from_idle);
710
711 if (i < 0)
712 goto failed;
713
714 if (cluster_configure(cluster, i, from_idle))
715 goto failed;
716
717 cluster->stats->sleep_time = start_time;
718 cluster_prepare(cluster->parent, &cluster->num_children_in_sync, i,
719 from_idle, start_time);
720
721 spin_unlock(&cluster->sync_lock);
722
723 if (!use_psci) {
724 struct lpm_cluster_level *level = &cluster->levels[i];
725
726 if (level->notify_rpm)
727 if (sys_pm_ops && sys_pm_ops->update_wakeup)
728 sys_pm_ops->update_wakeup(from_idle);
729 }
730
731 return;
732failed:
733 spin_unlock(&cluster->sync_lock);
734 cluster->stats->sleep_time = 0;
735}
736
737static void cluster_unprepare(struct lpm_cluster *cluster,
738 const struct cpumask *cpu, int child_idx, bool from_idle,
Maulik Shah083e22a2018-11-29 14:30:07 +0530739 int64_t end_time, bool success)
Raja Mallikb9ad4522018-04-19 15:23:49 +0530740{
741 struct lpm_cluster_level *level;
742 bool first_cpu;
743 int last_level, i, ret;
744
745 if (!cluster)
746 return;
747
748 if (cluster->min_child_level > child_idx)
749 return;
750
751 spin_lock(&cluster->sync_lock);
752 last_level = cluster->default_level;
753 first_cpu = cpumask_equal(&cluster->num_children_in_sync,
754 &cluster->child_cpus);
755 cpumask_andnot(&cluster->num_children_in_sync,
756 &cluster->num_children_in_sync, cpu);
757
758 for (i = 0; i < cluster->nlevels; i++) {
759 struct lpm_cluster_level *lvl = &cluster->levels[i];
760
761 if (child_idx >= lvl->min_child_level)
762 cpumask_andnot(&lvl->num_cpu_votes,
763 &lvl->num_cpu_votes, cpu);
764 }
765
766 if (!first_cpu || cluster->last_level == cluster->default_level)
767 goto unlock_return;
768
769 if (cluster->stats->sleep_time)
770 cluster->stats->sleep_time = end_time -
771 cluster->stats->sleep_time;
Maulik Shah083e22a2018-11-29 14:30:07 +0530772 lpm_stats_cluster_exit(cluster->stats, cluster->last_level, success);
Raja Mallikb9ad4522018-04-19 15:23:49 +0530773
774 level = &cluster->levels[cluster->last_level];
775 if (level->notify_rpm) {
776 if (sys_pm_ops && sys_pm_ops->exit)
Maulik Shah083e22a2018-11-29 14:30:07 +0530777 sys_pm_ops->exit(success);
Raja Mallikb9ad4522018-04-19 15:23:49 +0530778
779 /* If RPM bumps up CX to turbo, unvote CX turbo vote
780 * during exit of rpm assisted power collapse to
781 * reduce the power impact
782 */
783 lpm_wa_cx_unvote_send();
784
Maulik Shah735e1e72016-08-01 12:11:13 +0530785 if (cluster->no_saw_devices && !use_psci)
786 msm_spm_set_rpm_hs(false);
787
Raja Mallikb9ad4522018-04-19 15:23:49 +0530788 }
789
790 update_debug_pc_event(CLUSTER_EXIT, cluster->last_level,
791 cluster->num_children_in_sync.bits[0],
792 cluster->child_cpus.bits[0], from_idle);
793 trace_cluster_exit(cluster->cluster_name, cluster->last_level,
794 cluster->num_children_in_sync.bits[0],
795 cluster->child_cpus.bits[0], from_idle);
796
797 last_level = cluster->last_level;
798 cluster->last_level = cluster->default_level;
799
800 for (i = 0; i < cluster->ndevices; i++) {
801 level = &cluster->levels[cluster->default_level];
802 ret = set_device_mode(cluster, i, level);
803
804 WARN_ON(ret);
805
806 }
807
808 cluster_notify(cluster, &cluster->levels[last_level], false);
809 cluster_unprepare(cluster->parent, &cluster->child_cpus,
Maulik Shah083e22a2018-11-29 14:30:07 +0530810 last_level, from_idle, end_time, success);
Raja Mallikb9ad4522018-04-19 15:23:49 +0530811unlock_return:
812 spin_unlock(&cluster->sync_lock);
813}
814
815static inline void cpu_prepare(struct lpm_cluster *cluster, int cpu_index,
816 bool from_idle)
817{
818 struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index];
819 bool jtag_save_restore =
820 cluster->cpu->levels[cpu_index].jtag_save_restore;
821
822 /* Use broadcast timer for aggregating sleep mode within a cluster.
823 * A broadcast timer could be used in the following scenarios
824 * 1) The architected timer HW gets reset during certain low power
825 * modes and the core relies on a external(broadcast) timer to wake up
826 * from sleep. This information is passed through device tree.
827 * 2) The CPU low power mode could trigger a system low power mode.
828 * The low power module relies on Broadcast timer to aggregate the
829 * next wakeup within a cluster, in which case, CPU switches over to
830 * use broadcast timer.
831 */
832 if (from_idle && (cpu_level->use_bc_timer ||
833 (cpu_index >= cluster->min_child_level)))
834 tick_broadcast_enter();
835
836 if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
837 || (cpu_level->mode ==
838 MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)
839 || (cpu_level->is_reset)))
840 cpu_pm_enter();
841
842 /*
843 * Save JTAG registers for 8996v1.0 & 8996v2.x in C4 LPM
844 */
845 if (jtag_save_restore)
846 msm_jtag_save_state();
847}
848
849static inline void cpu_unprepare(struct lpm_cluster *cluster, int cpu_index,
850 bool from_idle)
851{
852 struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index];
853 bool jtag_save_restore =
854 cluster->cpu->levels[cpu_index].jtag_save_restore;
855
856 if (from_idle && (cpu_level->use_bc_timer ||
857 (cpu_index >= cluster->min_child_level)))
858 tick_broadcast_exit();
859
860 if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
861 || (cpu_level->mode ==
862 MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)
863 || cpu_level->is_reset))
864 cpu_pm_exit();
865
866 /*
867 * Restore JTAG registers for 8996v1.0 & 8996v2.x in C4 LPM
868 */
869 if (jtag_save_restore)
870 msm_jtag_restore_state();
871}
872
873#if defined(CONFIG_ARM_PSCI) || !defined(CONFIG_CPU_V7)
874static int get_cluster_id(struct lpm_cluster *cluster, int *aff_lvl)
875{
876 int state_id = 0;
877
878 if (!cluster)
879 return 0;
880
881 spin_lock(&cluster->sync_lock);
882
883 if (!cpumask_equal(&cluster->num_children_in_sync,
884 &cluster->child_cpus))
885 goto unlock_and_return;
886
887 state_id |= get_cluster_id(cluster->parent, aff_lvl);
888
889 if (cluster->last_level != cluster->default_level) {
890 struct lpm_cluster_level *level
891 = &cluster->levels[cluster->last_level];
892
893 state_id |= (level->psci_id & cluster->psci_mode_mask)
894 << cluster->psci_mode_shift;
895 (*aff_lvl)++;
896 }
897unlock_and_return:
898 spin_unlock(&cluster->sync_lock);
899 return state_id;
900}
901#endif
902
903#if !defined(CONFIG_CPU_V7)
904asmlinkage int __invoke_psci_fn_smc(u64, u64, u64, u64);
905static bool psci_enter_sleep(struct lpm_cluster *cluster,
906 int idx, bool from_idle)
907
908{
909 bool ret;
910 /*
911 * idx = 0 is the default LPM state
912 */
913 if (!idx) {
914 stop_critical_timings();
915 wfi();
916 start_critical_timings();
917 ret = true;
918 } else {
919 int affinity_level = 0;
920 int state_id = get_cluster_id(cluster, &affinity_level);
921 int power_state =
922 PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset);
923 bool success = false;
924
925 if (cluster->cpu->levels[idx].hyp_psci) {
926 stop_critical_timings();
927 __invoke_psci_fn_smc(0xC4000021, 0, 0, 0);
928 start_critical_timings();
929 return 1;
930 }
931
932 affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
933 state_id |= (power_state | affinity_level
934 | cluster->cpu->levels[idx].psci_id);
935
936 update_debug_pc_event(CPU_ENTER, state_id,
937 0xdeaffeed, 0xdeaffeed, true);
938 stop_critical_timings();
939 success = !arm_cpuidle_suspend(state_id);
940 start_critical_timings();
941 update_debug_pc_event(CPU_EXIT, state_id,
942 success, 0xdeaffeed, true);
943 ret = success;
944 }
945 return ret;
946}
947#elif defined(CONFIG_ARM_PSCI)
948static bool psci_enter_sleep(struct lpm_cluster *cluster,
949 int idx, bool from_idle)
950{
951 bool ret;
952
953 if (!idx) {
954 stop_critical_timings();
955 wfi();
956 start_critical_timings();
957 ret = true;
958 } else {
959 int affinity_level = 0;
960 int state_id = get_cluster_id(cluster, &affinity_level);
961 int power_state =
962 PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset);
963 bool success = false;
964
965 affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
966 state_id |= (power_state | affinity_level
967 | cluster->cpu->levels[idx].psci_id);
968
969 update_debug_pc_event(CPU_ENTER, state_id,
970 0xdeaffeed, 0xdeaffeed, true);
971 stop_critical_timings();
972 success = !arm_cpuidle_suspend(state_id);
973 start_critical_timings();
974 update_debug_pc_event(CPU_EXIT, state_id,
975 success, 0xdeaffeed, true);
976 ret = success;
977 }
978 return ret;
979}
980#else
981static bool psci_enter_sleep(struct lpm_cluster *cluster,
982 int idx, bool from_idle)
983{
984 WARN_ONCE(true, "PSCI cpu_suspend ops not supported\n");
985 return false;
986}
987#endif
988
989static int lpm_cpuidle_select(struct cpuidle_driver *drv,
990 struct cpuidle_device *dev)
991{
992 struct lpm_cluster *cluster = per_cpu(cpu_cluster, dev->cpu);
993 int idx;
994
995 if (!cluster)
996 return 0;
997
998 idx = cpu_power_select(dev, cluster->cpu);
999
1000 return idx;
1001}
1002
1003static int lpm_cpuidle_enter(struct cpuidle_device *dev,
1004 struct cpuidle_driver *drv, int idx)
1005{
1006 struct lpm_cluster *cluster = per_cpu(cpu_cluster, dev->cpu);
1007 bool success = true;
1008 const struct cpumask *cpumask = get_cpu_mask(dev->cpu);
1009 ktime_t start = ktime_get();
1010 int64_t start_time = ktime_to_ns(ktime_get()), end_time;
1011
1012 if (idx < 0)
1013 return -EINVAL;
1014
1015 cpu_prepare(cluster, idx, true);
1016 cluster_prepare(cluster, cpumask, idx, true, ktime_to_ns(ktime_get()));
1017
1018 trace_cpu_idle_enter(idx);
1019 lpm_stats_cpu_enter(idx, start_time);
1020
1021 if (need_resched())
1022 goto exit;
1023
1024 if (!use_psci) {
1025 if (idx > 0)
1026 update_debug_pc_event(CPU_ENTER, idx, 0xdeaffeed,
1027 0xdeaffeed, true);
1028 success = msm_cpu_pm_enter_sleep(cluster->cpu->levels[idx].mode,
1029 true);
1030
1031 if (idx > 0)
1032 update_debug_pc_event(CPU_EXIT, idx, success,
1033 0xdeaffeed, true);
1034 } else {
1035 success = psci_enter_sleep(cluster, idx, true);
1036 }
1037
1038exit:
1039 end_time = ktime_to_ns(ktime_get());
1040 lpm_stats_cpu_exit(idx, end_time, success);
1041
Maulik Shah083e22a2018-11-29 14:30:07 +05301042 cluster_unprepare(cluster, cpumask, idx, true, end_time, success);
Raja Mallikb9ad4522018-04-19 15:23:49 +05301043 cpu_unprepare(cluster, idx, true);
1044
1045 trace_cpu_idle_exit(idx, success);
1046 dev->last_residency = ktime_us_delta(ktime_get(), start);
1047 local_irq_enable();
1048
1049 return idx;
1050}
1051
1052#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS
1053static int cpuidle_register_cpu(struct cpuidle_driver *drv,
1054 struct cpumask *mask)
1055{
1056 struct cpuidle_device *device;
1057 int cpu, ret;
1058
1059
1060 if (!mask || !drv)
1061 return -EINVAL;
1062
1063 drv->cpumask = mask;
1064 ret = cpuidle_register_driver(drv);
1065 if (ret) {
1066 pr_err("Failed to register cpuidle driver %d\n", ret);
1067 goto failed_driver_register;
1068 }
1069
1070 for_each_cpu(cpu, mask) {
1071 device = &per_cpu(cpuidle_dev, cpu);
1072 device->cpu = cpu;
1073
1074 ret = cpuidle_register_device(device);
1075 if (ret) {
1076 pr_err("Failed to register cpuidle driver for cpu:%u\n",
1077 cpu);
1078 goto failed_driver_register;
1079 }
1080 }
1081 return ret;
1082failed_driver_register:
1083 for_each_cpu(cpu, mask)
1084 cpuidle_unregister_driver(drv);
1085 return ret;
1086}
1087#else
1088static int cpuidle_register_cpu(struct cpuidle_driver *drv,
1089 struct cpumask *mask)
1090{
1091 return cpuidle_register(drv, NULL);
1092}
1093#endif
1094
1095static struct cpuidle_governor lpm_governor = {
1096 .name = "qcom",
1097 .rating = 30,
1098 .select = lpm_cpuidle_select,
1099 .owner = THIS_MODULE,
1100};
1101
1102static int cluster_cpuidle_register(struct lpm_cluster *cl)
1103{
1104 int i = 0, ret = 0;
1105 unsigned int cpu;
1106 struct lpm_cluster *p = NULL;
1107
1108 if (!cl->cpu) {
1109 struct lpm_cluster *n;
1110
1111 list_for_each_entry(n, &cl->child, list) {
1112 ret = cluster_cpuidle_register(n);
1113 if (ret)
1114 break;
1115 }
1116 return ret;
1117 }
1118
1119 cl->drv = kzalloc(sizeof(*cl->drv), GFP_KERNEL);
1120 if (!cl->drv)
1121 return -ENOMEM;
1122
1123 cl->drv->name = "msm_idle";
1124
1125 for (i = 0; i < cl->cpu->nlevels; i++) {
1126 struct cpuidle_state *st = &cl->drv->states[i];
1127 struct lpm_cpu_level *cpu_level = &cl->cpu->levels[i];
1128
1129 snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i);
1130 snprintf(st->desc, CPUIDLE_DESC_LEN, "%s", cpu_level->name);
1131 st->flags = 0;
1132 st->exit_latency = cpu_level->pwr.latency_us;
1133 st->power_usage = cpu_level->pwr.ss_power;
1134 st->target_residency = 0;
1135 st->enter = lpm_cpuidle_enter;
1136 }
1137
1138 cl->drv->state_count = cl->cpu->nlevels;
1139 cl->drv->safe_state_index = 0;
1140 for_each_cpu(cpu, &cl->child_cpus)
1141 per_cpu(cpu_cluster, cpu) = cl;
1142
1143 for_each_possible_cpu(cpu) {
1144 if (cpu_online(cpu))
1145 continue;
1146 p = per_cpu(cpu_cluster, cpu);
1147 while (p) {
1148 int j;
1149
1150 spin_lock(&p->sync_lock);
1151 cpumask_set_cpu(cpu, &p->num_children_in_sync);
1152 for (j = 0; j < p->nlevels; j++)
1153 cpumask_copy(&p->levels[j].num_cpu_votes,
1154 &p->num_children_in_sync);
1155 spin_unlock(&p->sync_lock);
1156 p = p->parent;
1157 }
1158 }
1159 ret = cpuidle_register_cpu(cl->drv, &cl->child_cpus);
1160
1161 if (ret) {
1162 kfree(cl->drv);
1163 return -ENOMEM;
1164 }
1165 return 0;
1166}
1167
1168/**
1169 * init_lpm - initializes the governor
1170 */
1171static int __init init_lpm(void)
1172{
1173 return cpuidle_register_governor(&lpm_governor);
1174}
1175
1176postcore_initcall(init_lpm);
1177
1178static void register_cpu_lpm_stats(struct lpm_cpu *cpu,
1179 struct lpm_cluster *parent)
1180{
1181 const char **level_name;
1182 int i;
1183
1184 level_name = kcalloc(cpu->nlevels, sizeof(*level_name), GFP_KERNEL);
1185
1186 if (!level_name)
1187 return;
1188
1189 for (i = 0; i < cpu->nlevels; i++)
1190 level_name[i] = cpu->levels[i].name;
1191
1192 lpm_stats_config_level("cpu", level_name, cpu->nlevels,
1193 parent->stats, &parent->child_cpus);
1194
1195 kfree(level_name);
1196}
1197
1198static void register_cluster_lpm_stats(struct lpm_cluster *cl,
1199 struct lpm_cluster *parent)
1200{
1201 const char **level_name;
1202 int i;
1203 struct lpm_cluster *child;
1204
1205 if (!cl)
1206 return;
1207
1208 level_name = kcalloc(cl->nlevels, sizeof(*level_name), GFP_KERNEL);
1209
1210 if (!level_name)
1211 return;
1212
1213 for (i = 0; i < cl->nlevels; i++)
1214 level_name[i] = cl->levels[i].level_name;
1215
1216 cl->stats = lpm_stats_config_level(cl->cluster_name, level_name,
1217 cl->nlevels, parent ? parent->stats : NULL, NULL);
1218
1219 kfree(level_name);
1220
1221 if (cl->cpu) {
1222 register_cpu_lpm_stats(cl->cpu, cl);
1223 return;
1224 }
1225
1226 list_for_each_entry(child, &cl->child, list)
1227 register_cluster_lpm_stats(child, cl);
1228}
1229
1230static int lpm_suspend_prepare(void)
1231{
1232 suspend_in_progress = true;
1233 lpm_stats_suspend_enter();
1234
1235 return 0;
1236}
1237
1238static void lpm_suspend_wake(void)
1239{
1240 suspend_in_progress = false;
1241 lpm_stats_suspend_exit();
1242}
1243
1244static int lpm_suspend_enter(suspend_state_t state)
1245{
1246 int cpu = raw_smp_processor_id();
1247 struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
1248 struct lpm_cpu *lpm_cpu = cluster->cpu;
1249 const struct cpumask *cpumask = get_cpu_mask(cpu);
1250 int idx;
Maulik Shah083e22a2018-11-29 14:30:07 +05301251 bool success = true;
Raja Mallikb9ad4522018-04-19 15:23:49 +05301252
1253 for (idx = lpm_cpu->nlevels - 1; idx >= 0; idx--) {
1254
1255 if (lpm_cpu_mode_allow(cpu, idx, false))
1256 break;
1257 }
1258 if (idx < 0) {
1259 pr_err("Failed suspend\n");
1260 return 0;
1261 }
1262 cpu_prepare(cluster, idx, false);
1263 cluster_prepare(cluster, cpumask, idx, false, 0);
1264 if (idx > 0)
1265 update_debug_pc_event(CPU_ENTER, idx, 0xdeaffeed,
1266 0xdeaffeed, false);
1267
1268 /*
1269 * Print the clocks which are enabled during system suspend
1270 * This debug information is useful to know which are the
1271 * clocks that are enabled and preventing the system level
1272 * LPMs(XO and Vmin).
1273 */
1274 clock_debug_print_enabled(true);
1275
1276 if (!use_psci)
1277 msm_cpu_pm_enter_sleep(cluster->cpu->levels[idx].mode, false);
1278 else
Maulik Shah083e22a2018-11-29 14:30:07 +05301279 success = psci_enter_sleep(cluster, idx, true);
Raja Mallikb9ad4522018-04-19 15:23:49 +05301280
1281 if (idx > 0)
1282 update_debug_pc_event(CPU_EXIT, idx, true, 0xdeaffeed,
1283 false);
1284
Maulik Shah083e22a2018-11-29 14:30:07 +05301285 cluster_unprepare(cluster, cpumask, idx, false, 0, success);
Raja Mallikb9ad4522018-04-19 15:23:49 +05301286 cpu_unprepare(cluster, idx, false);
1287 return 0;
1288}
1289
1290static int lpm_dying_cpu(unsigned int cpu)
1291{
1292 struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
1293
1294 update_debug_pc_event(CPU_HP_DYING, cpu,
1295 cluster->num_children_in_sync.bits[0],
1296 cluster->child_cpus.bits[0], false);
1297 cluster_prepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
1298 return 0;
1299}
1300
1301static int lpm_starting_cpu(unsigned int cpu)
1302{
1303 struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
1304
1305 update_debug_pc_event(CPU_HP_STARTING, cpu,
1306 cluster->num_children_in_sync.bits[0],
1307 cluster->child_cpus.bits[0], false);
Maulik Shah083e22a2018-11-29 14:30:07 +05301308 cluster_unprepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS,
1309 false, 0, true);
Raja Mallikb9ad4522018-04-19 15:23:49 +05301310 return 0;
1311}
1312
1313static const struct platform_suspend_ops lpm_suspend_ops = {
1314 .enter = lpm_suspend_enter,
1315 .valid = suspend_valid_only_mem,
1316 .prepare_late = lpm_suspend_prepare,
1317 .wake = lpm_suspend_wake,
1318};
1319
1320static int lpm_probe(struct platform_device *pdev)
1321{
1322 int ret;
1323 int size;
1324 struct kobject *module_kobj = NULL;
1325 struct md_region md_entry;
1326
1327 get_online_cpus();
1328 lpm_root_node = lpm_of_parse_cluster(pdev);
1329
1330 if (IS_ERR_OR_NULL(lpm_root_node)) {
1331 pr_err("%s(): Failed to probe low power modes\n", __func__);
1332 put_online_cpus();
1333 return PTR_ERR(lpm_root_node);
1334 }
1335
1336 if (print_parsed_dt)
1337 cluster_dt_walkthrough(lpm_root_node);
1338
1339 /*
1340 * Register hotplug notifier before broadcast time to ensure there
1341 * to prevent race where a broadcast timer might not be setup on for a
1342 * core. BUG in existing code but no known issues possibly because of
1343 * how late lpm_levels gets initialized.
1344 */
1345 suspend_set_ops(&lpm_suspend_ops);
1346 hrtimer_init(&lpm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1347
1348 ret = remote_spin_lock_init(&scm_handoff_lock, SCM_HANDOFF_LOCK_ID);
1349 if (ret) {
1350 pr_err("%s: Failed initializing scm_handoff_lock (%d)\n",
1351 __func__, ret);
1352 put_online_cpus();
1353 return ret;
1354 }
1355 size = num_dbg_elements * sizeof(struct lpm_debug);
1356 lpm_debug = dma_alloc_coherent(&pdev->dev, size,
1357 &lpm_debug_phys, GFP_KERNEL);
1358 register_cluster_lpm_stats(lpm_root_node, NULL);
1359
1360 ret = cluster_cpuidle_register(lpm_root_node);
1361 put_online_cpus();
1362 if (ret) {
1363 pr_err("%s()Failed to register with cpuidle framework\n",
1364 __func__);
1365 goto failed;
1366 }
1367 ret = cpuhp_setup_state(CPUHP_AP_QCOM_SLEEP_STARTING,
1368 "AP_QCOM_SLEEP_STARTING",
1369 lpm_starting_cpu, lpm_dying_cpu);
1370 if (ret)
1371 goto failed;
1372
1373 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
1374 if (!module_kobj) {
1375 pr_err("%s: cannot find kobject for module %s\n",
1376 __func__, KBUILD_MODNAME);
1377 ret = -ENOENT;
1378 goto failed;
1379 }
1380
1381 ret = create_cluster_lvl_nodes(lpm_root_node, module_kobj);
1382 if (ret) {
1383 pr_err("%s(): Failed to create cluster level nodes\n",
1384 __func__);
1385 goto failed;
1386 }
1387
1388 /* Add lpm_debug to Minidump*/
1389 strlcpy(md_entry.name, "KLPMDEBUG", sizeof(md_entry.name));
1390 md_entry.virt_addr = (uintptr_t)lpm_debug;
1391 md_entry.phys_addr = lpm_debug_phys;
1392 md_entry.size = size;
1393 if (msm_minidump_add_region(&md_entry))
1394 pr_info("Failed to add lpm_debug in Minidump\n");
1395
1396 return 0;
1397failed:
1398 free_cluster_node(lpm_root_node);
1399 lpm_root_node = NULL;
1400 return ret;
1401}
1402
1403static const struct of_device_id lpm_mtch_tbl[] = {
1404 {.compatible = "qcom,lpm-levels"},
1405 {},
1406};
1407
1408static struct platform_driver lpm_driver = {
1409 .probe = lpm_probe,
1410 .driver = {
1411 .name = "lpm-levels",
1412 .owner = THIS_MODULE,
1413 .of_match_table = lpm_mtch_tbl,
1414 },
1415};
1416
1417static int __init lpm_levels_module_init(void)
1418{
1419 int rc;
1420
1421 rc = platform_driver_register(&lpm_driver);
1422 if (rc) {
1423 pr_info("Error registering %s\n", lpm_driver.driver.name);
1424 goto fail;
1425 }
1426
1427fail:
1428 return rc;
1429}
1430late_initcall(lpm_levels_module_init);
1431
1432enum msm_pm_l2_scm_flag lpm_cpu_pre_pc_cb(unsigned int cpu)
1433{
1434 struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
1435 enum msm_pm_l2_scm_flag retflag = MSM_SCM_L2_ON;
1436
1437 /*
1438 * No need to acquire the lock if probe isn't completed yet
1439 * In the event of the hotplug happening before lpm probe, we want to
1440 * flush the cache to make sure that L2 is flushed. In particular, this
1441 * could cause incoherencies for a cluster architecture. This wouldn't
1442 * affect the idle case as the idle driver wouldn't be registered
1443 * before the probe function
1444 */
1445 if (!cluster)
1446 return MSM_SCM_L2_OFF;
1447
1448 /*
1449 * Assumes L2 only. What/How parameters gets passed into TZ will
1450 * determine how this function reports this info back in msm-pm.c
1451 */
1452 spin_lock(&cluster->sync_lock);
1453
1454 if (!cluster->lpm_dev) {
1455 retflag = MSM_SCM_L2_OFF;
1456 goto unlock_and_return;
1457 }
1458
1459 if (!cpumask_equal(&cluster->num_children_in_sync,
1460 &cluster->child_cpus))
1461 goto unlock_and_return;
1462
1463 if (cluster->lpm_dev)
1464 retflag = cluster->lpm_dev->tz_flag;
1465 /*
1466 * The scm_handoff_lock will be release by the secure monitor.
1467 * It is used to serialize power-collapses from this point on,
1468 * so that both Linux and the secure context have a consistent
1469 * view regarding the number of running cpus (cpu_count).
1470 *
1471 * It must be acquired before releasing the cluster lock.
1472 */
1473unlock_and_return:
1474 update_debug_pc_event(PRE_PC_CB, retflag, 0xdeadbeef, 0xdeadbeef,
1475 0xdeadbeef);
1476 trace_pre_pc_cb(retflag);
1477 remote_spin_lock_rlock_id(&scm_handoff_lock,
1478 REMOTE_SPINLOCK_TID_START + cpu);
1479 spin_unlock(&cluster->sync_lock);
1480 return retflag;
1481}
1482
1483/**
1484 * lpm_cpu_hotplug_enter(): Called by dying CPU to terminate in low power mode
1485 *
1486 * @cpu: cpuid of the dying CPU
1487 *
1488 * Called from platform_cpu_kill() to terminate hotplug in a low power mode
1489 */
1490void lpm_cpu_hotplug_enter(unsigned int cpu)
1491{
1492 enum msm_pm_sleep_mode mode = MSM_PM_SLEEP_MODE_NR;
1493 struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
1494 int i;
1495 int idx = -1;
1496
1497 /*
1498 * If lpm isn't probed yet, try to put cpu into the one of the modes
1499 * available
1500 */
1501 if (!cluster) {
1502 if (msm_spm_is_mode_avail(
1503 MSM_SPM_MODE_POWER_COLLAPSE)){
1504 mode = MSM_PM_SLEEP_MODE_POWER_COLLAPSE;
1505 } else if (msm_spm_is_mode_avail(
1506 MSM_SPM_MODE_FASTPC)) {
1507 mode = MSM_PM_SLEEP_MODE_FASTPC;
1508 } else if (msm_spm_is_mode_avail(
1509 MSM_SPM_MODE_RETENTION)) {
1510 mode = MSM_PM_SLEEP_MODE_RETENTION;
1511 } else {
1512 pr_err("No mode avail for cpu%d hotplug\n", cpu);
1513 WARN_ON(1);
1514 return;
1515 }
1516 } else {
1517 struct lpm_cpu *lpm_cpu;
1518 uint32_t ss_pwr = ~0U;
1519
1520 lpm_cpu = cluster->cpu;
1521 for (i = 0; i < lpm_cpu->nlevels; i++) {
1522 if (ss_pwr < lpm_cpu->levels[i].pwr.ss_power)
1523 continue;
1524 ss_pwr = lpm_cpu->levels[i].pwr.ss_power;
1525 idx = i;
1526 mode = lpm_cpu->levels[i].mode;
1527 }
1528
1529 if (mode == MSM_PM_SLEEP_MODE_NR)
1530 return;
1531
1532 WARN_ON(idx < 0);
1533 cluster_prepare(cluster, get_cpu_mask(cpu), idx, false, 0);
1534 }
1535
1536 msm_cpu_pm_enter_sleep(mode, false);
1537}