blob: 006a5ef0dfb676f8a2db135b0a3dc0a75d3dedd5 [file] [log] [blame]
Raja Mallikb9ad4522018-04-19 15:23:49 +05301/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/slab.h>
18#include <linux/platform_device.h>
19#include <linux/mutex.h>
20#include <linux/cpu.h>
21#include <linux/of.h>
22#include <linux/hrtimer.h>
23#include <linux/ktime.h>
24#include <linux/tick.h>
25#include <linux/suspend.h>
26#include <linux/pm_qos.h>
27#include <linux/of_platform.h>
28#include <linux/smp.h>
29#include <linux/remote_spinlock.h>
30#include <linux/msm_remote_spinlock.h>
31#include <linux/dma-mapping.h>
32#include <linux/coresight-cti.h>
33#include <linux/moduleparam.h>
34#include <linux/sched.h>
35#include <linux/cpu_pm.h>
36#include <soc/qcom/spm.h>
37#include <soc/qcom/pm-legacy.h>
38#include <soc/qcom/rpm-notifier.h>
39#include <soc/qcom/event_timer.h>
40#include <soc/qcom/lpm-stats.h>
41#include <soc/qcom/lpm_levels.h>
42#include <soc/qcom/jtag.h>
43#include <asm/cputype.h>
44#include <asm/arch_timer.h>
45#include <asm/cacheflush.h>
46#include <asm/suspend.h>
47#include "lpm-levels-legacy.h"
48#include "lpm-workarounds.h"
49#include <trace/events/power.h>
50#define CREATE_TRACE_POINTS
51#include <trace/events/trace_msm_low_power.h>
52#if defined(CONFIG_COMMON_CLK)
53#include "../clk/clk.h"
54#elif defined(CONFIG_COMMON_CLK_MSM)
55#include "../../drivers/clk/msm/clock.h"
56#endif /* CONFIG_COMMON_CLK */
57#include <soc/qcom/minidump.h>
58
59#define SCLK_HZ (32768)
60#define SCM_HANDOFF_LOCK_ID "S:7"
61#define PSCI_POWER_STATE(reset) (reset << 30)
62#define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24)
63static remote_spinlock_t scm_handoff_lock;
64
65enum {
66 MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0),
67 MSM_LPM_LVL_DBG_IDLE_LIMITS = BIT(1),
68};
69
70enum debug_event {
71 CPU_ENTER,
72 CPU_EXIT,
73 CLUSTER_ENTER,
74 CLUSTER_EXIT,
75 PRE_PC_CB,
76 CPU_HP_STARTING,
77 CPU_HP_DYING,
78};
79
80struct lpm_debug {
81 cycle_t time;
82 enum debug_event evt;
83 int cpu;
84 uint32_t arg1;
85 uint32_t arg2;
86 uint32_t arg3;
87 uint32_t arg4;
88};
89
90static struct system_pm_ops *sys_pm_ops;
91struct lpm_cluster *lpm_root_node;
92
93static DEFINE_PER_CPU(struct lpm_cluster*, cpu_cluster);
94static bool suspend_in_progress;
95static struct hrtimer lpm_hrtimer;
96static struct lpm_debug *lpm_debug;
97static phys_addr_t lpm_debug_phys;
98
99static const int num_dbg_elements = 0x100;
100
101static void cluster_unprepare(struct lpm_cluster *cluster,
102 const struct cpumask *cpu, int child_idx, bool from_idle,
103 int64_t time);
104static void cluster_prepare(struct lpm_cluster *cluster,
105 const struct cpumask *cpu, int child_idx, bool from_idle,
106 int64_t time);
107
108static bool menu_select;
109module_param_named(
110 menu_select, menu_select, bool, 0664
111);
112
113static bool print_parsed_dt;
114module_param_named(
115 print_parsed_dt, print_parsed_dt, bool, 0664
116);
117
118static bool sleep_disabled;
119module_param_named(sleep_disabled,
120 sleep_disabled, bool, 0664);
121
122s32 msm_cpuidle_get_deep_idle_latency(void)
123{
124 return 10;
125}
126EXPORT_SYMBOL(msm_cpuidle_get_deep_idle_latency);
127
128uint32_t register_system_pm_ops(struct system_pm_ops *pm_ops)
129{
130 if (sys_pm_ops)
131 return -EUSERS;
132
133 sys_pm_ops = pm_ops;
134
135 return 0;
136}
137
138static uint32_t least_cluster_latency(struct lpm_cluster *cluster,
139 struct latency_level *lat_level)
140{
141 struct list_head *list;
142 struct lpm_cluster_level *level;
143 struct lpm_cluster *n;
144 struct power_params *pwr_params;
145 uint32_t latency = 0;
146 int i;
147
148 if (!cluster->list.next) {
149 for (i = 0; i < cluster->nlevels; i++) {
150 level = &cluster->levels[i];
151 pwr_params = &level->pwr;
152 if (lat_level->reset_level == level->reset_level) {
153 if ((latency > pwr_params->latency_us)
154 || (!latency))
155 latency = pwr_params->latency_us;
156 break;
157 }
158 }
159 } else {
160 list_for_each(list, &cluster->parent->child) {
161 n = list_entry(list, typeof(*n), list);
162 if (lat_level->level_name) {
163 if (strcmp(lat_level->level_name,
164 n->cluster_name))
165 continue;
166 }
167 for (i = 0; i < n->nlevels; i++) {
168 level = &n->levels[i];
169 pwr_params = &level->pwr;
170 if (lat_level->reset_level ==
171 level->reset_level) {
172 if ((latency > pwr_params->latency_us)
173 || (!latency))
174 latency =
175 pwr_params->latency_us;
176 break;
177 }
178 }
179 }
180 }
181 return latency;
182}
183
184static uint32_t least_cpu_latency(struct list_head *child,
185 struct latency_level *lat_level)
186{
187 struct list_head *list;
188 struct lpm_cpu_level *level;
189 struct power_params *pwr_params;
190 struct lpm_cpu *cpu;
191 struct lpm_cluster *n;
192 uint32_t latency = 0;
193 int i;
194
195 list_for_each(list, child) {
196 n = list_entry(list, typeof(*n), list);
197 if (lat_level->level_name) {
198 if (strcmp(lat_level->level_name, n->cluster_name))
199 continue;
200 }
201 cpu = n->cpu;
202 for (i = 0; i < cpu->nlevels; i++) {
203 level = &cpu->levels[i];
204 pwr_params = &level->pwr;
205 if (lat_level->reset_level == level->reset_level) {
206 if ((latency > pwr_params->latency_us)
207 || (!latency))
208 latency = pwr_params->latency_us;
209 break;
210 }
211 }
212 }
213 return latency;
214}
215
216static struct lpm_cluster *cluster_aff_match(struct lpm_cluster *cluster,
217 int affinity_level)
218{
219 struct lpm_cluster *n;
220
221 if ((cluster->aff_level == affinity_level)
222 || ((cluster->cpu) && (affinity_level == 0)))
223 return cluster;
224 else if (!cluster->cpu) {
225 n = list_entry(cluster->child.next, typeof(*n), list);
226 return cluster_aff_match(n, affinity_level);
227 } else
228 return NULL;
229}
230
231int lpm_get_latency(struct latency_level *level, uint32_t *latency)
232{
233 struct lpm_cluster *cluster;
234 uint32_t val;
235
236 if (!lpm_root_node) {
237 pr_err("%s: lpm_probe not completed\n", __func__);
238 return -EAGAIN;
239 }
240
241 if ((level->affinity_level < 0)
242 || (level->affinity_level > lpm_root_node->aff_level)
243 || (level->reset_level < LPM_RESET_LVL_RET)
244 || (level->reset_level > LPM_RESET_LVL_PC)
245 || !latency)
246 return -EINVAL;
247
248 cluster = cluster_aff_match(lpm_root_node, level->affinity_level);
249 if (!cluster) {
250 pr_err("%s:No matching cluster found for affinity_level:%d\n",
251 __func__, level->affinity_level);
252 return -EINVAL;
253 }
254
255 if (level->affinity_level == 0)
256 val = least_cpu_latency(&cluster->parent->child, level);
257 else
258 val = least_cluster_latency(cluster, level);
259
260 if (!val) {
261 pr_err("%s:No mode with affinity_level:%d reset_level:%d\n",
262 __func__, level->affinity_level, level->reset_level);
263 return -EINVAL;
264 }
265
266 *latency = val;
267
268 return 0;
269}
270EXPORT_SYMBOL(lpm_get_latency);
271
272static void update_debug_pc_event(enum debug_event event, uint32_t arg1,
273 uint32_t arg2, uint32_t arg3, uint32_t arg4)
274{
275 struct lpm_debug *dbg;
276 int idx;
277 static DEFINE_SPINLOCK(debug_lock);
278 static int pc_event_index;
279
280 if (!lpm_debug)
281 return;
282
283 spin_lock(&debug_lock);
284 idx = pc_event_index++;
285 dbg = &lpm_debug[idx & (num_dbg_elements - 1)];
286
287 dbg->evt = event;
288 dbg->time = arch_counter_get_cntpct();
289 dbg->cpu = raw_smp_processor_id();
290 dbg->arg1 = arg1;
291 dbg->arg2 = arg2;
292 dbg->arg3 = arg3;
293 dbg->arg4 = arg4;
294 spin_unlock(&debug_lock);
295}
296
297static enum hrtimer_restart lpm_hrtimer_cb(struct hrtimer *h)
298{
299 return HRTIMER_NORESTART;
300}
301
302static void msm_pm_set_timer(uint32_t modified_time_us)
303{
304 u64 modified_time_ns = modified_time_us * NSEC_PER_USEC;
305 ktime_t modified_ktime = ns_to_ktime(modified_time_ns);
306
307 lpm_hrtimer.function = lpm_hrtimer_cb;
308 hrtimer_start(&lpm_hrtimer, modified_ktime, HRTIMER_MODE_REL_PINNED);
309}
310
311int set_l2_mode(struct low_power_ops *ops, int mode,
312 struct lpm_cluster_level *level)
313{
314 int lpm = mode;
315 int rc = 0;
316 bool notify_rpm = level->notify_rpm;
317 struct low_power_ops *cpu_ops = per_cpu(cpu_cluster,
318 smp_processor_id())->lpm_dev;
319
320 if (cpu_ops->tz_flag & MSM_SCM_L2_OFF ||
321 cpu_ops->tz_flag & MSM_SCM_L2_GDHS)
322 coresight_cti_ctx_restore();
323
324 switch (mode) {
325 case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
326 case MSM_SPM_MODE_POWER_COLLAPSE:
327 case MSM_SPM_MODE_FASTPC:
328 if (level->no_cache_flush)
329 cpu_ops->tz_flag = MSM_SCM_L2_GDHS;
330 else
331 cpu_ops->tz_flag = MSM_SCM_L2_OFF;
332 coresight_cti_ctx_save();
333 break;
334 case MSM_SPM_MODE_GDHS:
335 cpu_ops->tz_flag = MSM_SCM_L2_GDHS;
336 coresight_cti_ctx_save();
337 break;
338 case MSM_SPM_MODE_CLOCK_GATING:
339 case MSM_SPM_MODE_RETENTION:
340 case MSM_SPM_MODE_DISABLED:
341 cpu_ops->tz_flag = MSM_SCM_L2_ON;
342 break;
343 default:
344 cpu_ops->tz_flag = MSM_SCM_L2_ON;
345 lpm = MSM_SPM_MODE_DISABLED;
346 break;
347 }
348
Anil Kumar Mamidala3b237172016-03-30 20:45:33 +0530349 if (lpm_wa_get_skip_l2_spm())
350 rc = msm_spm_config_low_power_mode_addr(ops->spm, lpm,
351 notify_rpm);
352 else
353 rc = msm_spm_config_low_power_mode(ops->spm, lpm, notify_rpm);
Raja Mallikb9ad4522018-04-19 15:23:49 +0530354
355 if (rc)
356 pr_err("%s: Failed to set L2 low power mode %d, ERR %d",
357 __func__, lpm, rc);
358
359 return rc;
360}
361
362int set_l3_mode(struct low_power_ops *ops, int mode,
363 struct lpm_cluster_level *level)
364{
365 bool notify_rpm = level->notify_rpm;
366 struct low_power_ops *cpu_ops = per_cpu(cpu_cluster,
367 smp_processor_id())->lpm_dev;
368
369 switch (mode) {
370 case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
371 case MSM_SPM_MODE_POWER_COLLAPSE:
372 case MSM_SPM_MODE_FASTPC:
373 cpu_ops->tz_flag |= MSM_SCM_L3_PC_OFF;
374 break;
375 default:
376 break;
377 }
378 return msm_spm_config_low_power_mode(ops->spm, mode, notify_rpm);
379}
380
381
382int set_system_mode(struct low_power_ops *ops, int mode,
383 struct lpm_cluster_level *level)
384{
385 bool notify_rpm = level->notify_rpm;
386
387 return msm_spm_config_low_power_mode(ops->spm, mode, notify_rpm);
388}
389
390static int set_device_mode(struct lpm_cluster *cluster, int ndevice,
391 struct lpm_cluster_level *level)
392{
393 struct low_power_ops *ops;
394
395 if (use_psci)
396 return 0;
397
398 ops = &cluster->lpm_dev[ndevice];
399 if (ops && ops->set_mode)
400 return ops->set_mode(ops, level->mode[ndevice],
401 level);
402 else
403 return -EINVAL;
404}
405
406static int cpu_power_select(struct cpuidle_device *dev,
407 struct lpm_cpu *cpu)
408{
409 int best_level = 0;
410 uint32_t latency_us = pm_qos_request_for_cpu(PM_QOS_CPU_DMA_LATENCY,
411 dev->cpu);
412 s64 sleep_us = ktime_to_us(tick_nohz_get_sleep_length());
413 uint32_t modified_time_us = 0;
414 uint32_t next_event_us = 0;
415 int i;
416 uint32_t lvl_latency_us = 0;
417 uint32_t *residency = get_per_cpu_max_residency(dev->cpu);
418
419 if (!cpu)
420 return best_level;
421
422 if ((sleep_disabled && !cpu_isolated(dev->cpu)) || sleep_us < 0)
423 return 0;
424
425 next_event_us = (uint32_t)(ktime_to_us(get_next_event_time(dev->cpu)));
426
427 for (i = 0; i < cpu->nlevels; i++) {
428 struct lpm_cpu_level *level = &cpu->levels[i];
429 struct power_params *pwr_params = &level->pwr;
430 uint32_t next_wakeup_us = (uint32_t)sleep_us;
431 enum msm_pm_sleep_mode mode = level->mode;
432 bool allow;
433
434 allow = lpm_cpu_mode_allow(dev->cpu, i, true);
435
436 if (!allow)
437 continue;
438
439 lvl_latency_us = pwr_params->latency_us;
440
441 if (latency_us < lvl_latency_us)
442 break;
443
444 if (next_event_us) {
445 if (next_event_us < lvl_latency_us)
446 break;
447
448 if (((next_event_us - lvl_latency_us) < sleep_us) ||
449 (next_event_us < sleep_us))
450 next_wakeup_us = next_event_us - lvl_latency_us;
451 }
452
453 best_level = i;
454
455 if (next_event_us && next_event_us < sleep_us &&
456 (mode != MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT))
457 modified_time_us
458 = next_event_us - lvl_latency_us;
459 else
460 modified_time_us = 0;
461
462 if (next_wakeup_us <= residency[i])
463 break;
464 }
465
466 if (modified_time_us)
467 msm_pm_set_timer(modified_time_us);
468
469 trace_cpu_power_select(best_level, sleep_us, latency_us, next_event_us);
470
471 return best_level;
472}
473
474static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster,
475 struct cpumask *mask, bool from_idle)
476{
477 int cpu;
478 int next_cpu = raw_smp_processor_id();
479 ktime_t next_event;
480 struct cpumask online_cpus_in_cluster;
481
482 next_event.tv64 = KTIME_MAX;
483 if (!from_idle) {
484 if (mask)
485 cpumask_copy(mask, cpumask_of(raw_smp_processor_id()));
486 return ~0ULL;
487 }
488
489 cpumask_and(&online_cpus_in_cluster,
490 &cluster->num_children_in_sync, cpu_online_mask);
491
492 for_each_cpu(cpu, &online_cpus_in_cluster) {
493 ktime_t *next_event_c;
494
495 next_event_c = get_next_event_cpu(cpu);
496 if (next_event_c->tv64 < next_event.tv64) {
497 next_event.tv64 = next_event_c->tv64;
498 next_cpu = cpu;
499 }
500 }
501
502 if (mask)
503 cpumask_copy(mask, cpumask_of(next_cpu));
504
505
506 if (ktime_to_us(next_event) > ktime_to_us(ktime_get()))
507 return ktime_to_us(ktime_sub(next_event, ktime_get()));
508 else
509 return 0;
510}
511
512static int cluster_select(struct lpm_cluster *cluster, bool from_idle)
513{
514 int best_level = -1;
515 int i;
516 struct cpumask mask;
517 uint32_t latency_us = ~0U;
518 uint32_t sleep_us;
519
520 if (!cluster)
521 return -EINVAL;
522
523 sleep_us = (uint32_t)get_cluster_sleep_time(cluster, NULL, from_idle);
524
525 if (cpumask_and(&mask, cpu_online_mask, &cluster->child_cpus))
526 latency_us = pm_qos_request_for_cpumask(PM_QOS_CPU_DMA_LATENCY,
527 &mask);
528
529 /*
530 * If atleast one of the core in the cluster is online, the cluster
531 * low power modes should be determined by the idle characteristics
532 * even if the last core enters the low power mode as a part of
533 * hotplug.
534 */
535
536 if (!from_idle && num_online_cpus() > 1 &&
537 cpumask_intersects(&cluster->child_cpus, cpu_online_mask))
538 from_idle = true;
539
540 for (i = 0; i < cluster->nlevels; i++) {
541 struct lpm_cluster_level *level = &cluster->levels[i];
542 struct power_params *pwr_params = &level->pwr;
543
544 if (!lpm_cluster_mode_allow(cluster, i, from_idle))
545 continue;
546
547 if (level->last_core_only &&
548 cpumask_weight(cpu_online_mask) > 1)
549 continue;
550
551 if (!cpumask_equal(&cluster->num_children_in_sync,
552 &level->num_cpu_votes))
553 continue;
554
555 if (from_idle && latency_us < pwr_params->latency_us)
556 break;
557
558 if (sleep_us < pwr_params->time_overhead_us)
559 break;
560
561 if (suspend_in_progress && from_idle && level->notify_rpm)
562 continue;
563
564 if (level->notify_rpm) {
565 if (!(sys_pm_ops && sys_pm_ops->sleep_allowed))
566 continue;
567 if (!sys_pm_ops->sleep_allowed())
568 continue;
569 }
570
571 best_level = i;
572
573 if (from_idle && sleep_us <= pwr_params->max_residency)
574 break;
575 }
576
577 return best_level;
578}
579
580static void cluster_notify(struct lpm_cluster *cluster,
581 struct lpm_cluster_level *level, bool enter)
582{
583 if (level->is_reset && enter)
584 cpu_cluster_pm_enter(cluster->aff_level);
585 else if (level->is_reset && !enter)
586 cpu_cluster_pm_exit(cluster->aff_level);
587}
588
589static unsigned int get_next_online_cpu(bool from_idle)
590{
591 unsigned int cpu;
592 ktime_t next_event;
593 unsigned int next_cpu = raw_smp_processor_id();
594
595 if (!from_idle)
596 return next_cpu;
597 next_event.tv64 = KTIME_MAX;
598 for_each_online_cpu(cpu) {
599 ktime_t *next_event_c;
600
601 next_event_c = get_next_event_cpu(cpu);
602 if (next_event_c->tv64 < next_event.tv64) {
603 next_event.tv64 = next_event_c->tv64;
604 next_cpu = cpu;
605 }
606 }
607 return next_cpu;
608}
609
610static int cluster_configure(struct lpm_cluster *cluster, int idx,
611 bool from_idle)
612{
613 struct lpm_cluster_level *level = &cluster->levels[idx];
614 struct cpumask cpumask;
615 unsigned int cpu;
616 int ret, i;
617
618 if (!cpumask_equal(&cluster->num_children_in_sync, &cluster->child_cpus)
619 || is_IPI_pending(&cluster->num_children_in_sync)) {
620 return -EPERM;
621 }
622
623 if (idx != cluster->default_level) {
624 update_debug_pc_event(CLUSTER_ENTER, idx,
625 cluster->num_children_in_sync.bits[0],
626 cluster->child_cpus.bits[0], from_idle);
627 trace_cluster_enter(cluster->cluster_name, idx,
628 cluster->num_children_in_sync.bits[0],
629 cluster->child_cpus.bits[0], from_idle);
630 lpm_stats_cluster_enter(cluster->stats, idx);
631 }
632
633 for (i = 0; i < cluster->ndevices; i++) {
634 ret = set_device_mode(cluster, i, level);
635 if (ret)
636 goto failed_set_mode;
637 }
638
639 if (level->notify_rpm) {
640 struct cpumask *nextcpu;
641
642 cpu = get_next_online_cpu(from_idle);
643 cpumask_copy(&cpumask, cpumask_of(cpu));
644 nextcpu = level->disable_dynamic_routing ? NULL : &cpumask;
645
646 if (sys_pm_ops && sys_pm_ops->enter)
647 if ((sys_pm_ops->enter(nextcpu)))
648 return -EBUSY;
Maulik Shah735e1e72016-08-01 12:11:13 +0530649
650 if (cluster->no_saw_devices && !use_psci)
651 msm_spm_set_rpm_hs(true);
Raja Mallikb9ad4522018-04-19 15:23:49 +0530652 }
653
654 /* Notify cluster enter event after successfully config completion */
655 cluster_notify(cluster, level, true);
656
657 cluster->last_level = idx;
658 return 0;
659
660failed_set_mode:
661
662 for (i = 0; i < cluster->ndevices; i++) {
663 int rc = 0;
664
665 level = &cluster->levels[cluster->default_level];
666 rc = set_device_mode(cluster, i, level);
667 WARN_ON(rc);
668 }
669 return ret;
670}
671
672static void cluster_prepare(struct lpm_cluster *cluster,
673 const struct cpumask *cpu, int child_idx, bool from_idle,
674 int64_t start_time)
675{
676 int i;
677
678 if (!cluster)
679 return;
680
681 if (cluster->min_child_level > child_idx)
682 return;
683
684 spin_lock(&cluster->sync_lock);
685 cpumask_or(&cluster->num_children_in_sync, cpu,
686 &cluster->num_children_in_sync);
687
688 for (i = 0; i < cluster->nlevels; i++) {
689 struct lpm_cluster_level *lvl = &cluster->levels[i];
690
691 if (child_idx >= lvl->min_child_level)
692 cpumask_or(&lvl->num_cpu_votes, cpu,
693 &lvl->num_cpu_votes);
694 }
695
696 /*
697 * cluster_select() does not make any configuration changes. So its ok
698 * to release the lock here. If a core wakes up for a rude request,
699 * it need not wait for another to finish its cluster selection and
700 * configuration process
701 */
702
703 if (!cpumask_equal(&cluster->num_children_in_sync,
704 &cluster->child_cpus))
705 goto failed;
706
707 i = cluster_select(cluster, from_idle);
708
709 if (i < 0)
710 goto failed;
711
712 if (cluster_configure(cluster, i, from_idle))
713 goto failed;
714
715 cluster->stats->sleep_time = start_time;
716 cluster_prepare(cluster->parent, &cluster->num_children_in_sync, i,
717 from_idle, start_time);
718
719 spin_unlock(&cluster->sync_lock);
720
721 if (!use_psci) {
722 struct lpm_cluster_level *level = &cluster->levels[i];
723
724 if (level->notify_rpm)
725 if (sys_pm_ops && sys_pm_ops->update_wakeup)
726 sys_pm_ops->update_wakeup(from_idle);
727 }
728
729 return;
730failed:
731 spin_unlock(&cluster->sync_lock);
732 cluster->stats->sleep_time = 0;
733}
734
735static void cluster_unprepare(struct lpm_cluster *cluster,
736 const struct cpumask *cpu, int child_idx, bool from_idle,
737 int64_t end_time)
738{
739 struct lpm_cluster_level *level;
740 bool first_cpu;
741 int last_level, i, ret;
742
743 if (!cluster)
744 return;
745
746 if (cluster->min_child_level > child_idx)
747 return;
748
749 spin_lock(&cluster->sync_lock);
750 last_level = cluster->default_level;
751 first_cpu = cpumask_equal(&cluster->num_children_in_sync,
752 &cluster->child_cpus);
753 cpumask_andnot(&cluster->num_children_in_sync,
754 &cluster->num_children_in_sync, cpu);
755
756 for (i = 0; i < cluster->nlevels; i++) {
757 struct lpm_cluster_level *lvl = &cluster->levels[i];
758
759 if (child_idx >= lvl->min_child_level)
760 cpumask_andnot(&lvl->num_cpu_votes,
761 &lvl->num_cpu_votes, cpu);
762 }
763
764 if (!first_cpu || cluster->last_level == cluster->default_level)
765 goto unlock_return;
766
767 if (cluster->stats->sleep_time)
768 cluster->stats->sleep_time = end_time -
769 cluster->stats->sleep_time;
770 lpm_stats_cluster_exit(cluster->stats, cluster->last_level, true);
771
772 level = &cluster->levels[cluster->last_level];
773 if (level->notify_rpm) {
774 if (sys_pm_ops && sys_pm_ops->exit)
775 sys_pm_ops->exit();
776
777 /* If RPM bumps up CX to turbo, unvote CX turbo vote
778 * during exit of rpm assisted power collapse to
779 * reduce the power impact
780 */
781 lpm_wa_cx_unvote_send();
782
Maulik Shah735e1e72016-08-01 12:11:13 +0530783 if (cluster->no_saw_devices && !use_psci)
784 msm_spm_set_rpm_hs(false);
785
Raja Mallikb9ad4522018-04-19 15:23:49 +0530786 }
787
788 update_debug_pc_event(CLUSTER_EXIT, cluster->last_level,
789 cluster->num_children_in_sync.bits[0],
790 cluster->child_cpus.bits[0], from_idle);
791 trace_cluster_exit(cluster->cluster_name, cluster->last_level,
792 cluster->num_children_in_sync.bits[0],
793 cluster->child_cpus.bits[0], from_idle);
794
795 last_level = cluster->last_level;
796 cluster->last_level = cluster->default_level;
797
798 for (i = 0; i < cluster->ndevices; i++) {
799 level = &cluster->levels[cluster->default_level];
800 ret = set_device_mode(cluster, i, level);
801
802 WARN_ON(ret);
803
804 }
805
806 cluster_notify(cluster, &cluster->levels[last_level], false);
807 cluster_unprepare(cluster->parent, &cluster->child_cpus,
808 last_level, from_idle, end_time);
809unlock_return:
810 spin_unlock(&cluster->sync_lock);
811}
812
813static inline void cpu_prepare(struct lpm_cluster *cluster, int cpu_index,
814 bool from_idle)
815{
816 struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index];
817 bool jtag_save_restore =
818 cluster->cpu->levels[cpu_index].jtag_save_restore;
819
820 /* Use broadcast timer for aggregating sleep mode within a cluster.
821 * A broadcast timer could be used in the following scenarios
822 * 1) The architected timer HW gets reset during certain low power
823 * modes and the core relies on a external(broadcast) timer to wake up
824 * from sleep. This information is passed through device tree.
825 * 2) The CPU low power mode could trigger a system low power mode.
826 * The low power module relies on Broadcast timer to aggregate the
827 * next wakeup within a cluster, in which case, CPU switches over to
828 * use broadcast timer.
829 */
830 if (from_idle && (cpu_level->use_bc_timer ||
831 (cpu_index >= cluster->min_child_level)))
832 tick_broadcast_enter();
833
834 if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
835 || (cpu_level->mode ==
836 MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)
837 || (cpu_level->is_reset)))
838 cpu_pm_enter();
839
840 /*
841 * Save JTAG registers for 8996v1.0 & 8996v2.x in C4 LPM
842 */
843 if (jtag_save_restore)
844 msm_jtag_save_state();
845}
846
847static inline void cpu_unprepare(struct lpm_cluster *cluster, int cpu_index,
848 bool from_idle)
849{
850 struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index];
851 bool jtag_save_restore =
852 cluster->cpu->levels[cpu_index].jtag_save_restore;
853
854 if (from_idle && (cpu_level->use_bc_timer ||
855 (cpu_index >= cluster->min_child_level)))
856 tick_broadcast_exit();
857
858 if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
859 || (cpu_level->mode ==
860 MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)
861 || cpu_level->is_reset))
862 cpu_pm_exit();
863
864 /*
865 * Restore JTAG registers for 8996v1.0 & 8996v2.x in C4 LPM
866 */
867 if (jtag_save_restore)
868 msm_jtag_restore_state();
869}
870
871#if defined(CONFIG_ARM_PSCI) || !defined(CONFIG_CPU_V7)
872static int get_cluster_id(struct lpm_cluster *cluster, int *aff_lvl)
873{
874 int state_id = 0;
875
876 if (!cluster)
877 return 0;
878
879 spin_lock(&cluster->sync_lock);
880
881 if (!cpumask_equal(&cluster->num_children_in_sync,
882 &cluster->child_cpus))
883 goto unlock_and_return;
884
885 state_id |= get_cluster_id(cluster->parent, aff_lvl);
886
887 if (cluster->last_level != cluster->default_level) {
888 struct lpm_cluster_level *level
889 = &cluster->levels[cluster->last_level];
890
891 state_id |= (level->psci_id & cluster->psci_mode_mask)
892 << cluster->psci_mode_shift;
893 (*aff_lvl)++;
894 }
895unlock_and_return:
896 spin_unlock(&cluster->sync_lock);
897 return state_id;
898}
899#endif
900
901#if !defined(CONFIG_CPU_V7)
902asmlinkage int __invoke_psci_fn_smc(u64, u64, u64, u64);
903static bool psci_enter_sleep(struct lpm_cluster *cluster,
904 int idx, bool from_idle)
905
906{
907 bool ret;
908 /*
909 * idx = 0 is the default LPM state
910 */
911 if (!idx) {
912 stop_critical_timings();
913 wfi();
914 start_critical_timings();
915 ret = true;
916 } else {
917 int affinity_level = 0;
918 int state_id = get_cluster_id(cluster, &affinity_level);
919 int power_state =
920 PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset);
921 bool success = false;
922
923 if (cluster->cpu->levels[idx].hyp_psci) {
924 stop_critical_timings();
925 __invoke_psci_fn_smc(0xC4000021, 0, 0, 0);
926 start_critical_timings();
927 return 1;
928 }
929
930 affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
931 state_id |= (power_state | affinity_level
932 | cluster->cpu->levels[idx].psci_id);
933
934 update_debug_pc_event(CPU_ENTER, state_id,
935 0xdeaffeed, 0xdeaffeed, true);
936 stop_critical_timings();
937 success = !arm_cpuidle_suspend(state_id);
938 start_critical_timings();
939 update_debug_pc_event(CPU_EXIT, state_id,
940 success, 0xdeaffeed, true);
941 ret = success;
942 }
943 return ret;
944}
945#elif defined(CONFIG_ARM_PSCI)
946static bool psci_enter_sleep(struct lpm_cluster *cluster,
947 int idx, bool from_idle)
948{
949 bool ret;
950
951 if (!idx) {
952 stop_critical_timings();
953 wfi();
954 start_critical_timings();
955 ret = true;
956 } else {
957 int affinity_level = 0;
958 int state_id = get_cluster_id(cluster, &affinity_level);
959 int power_state =
960 PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset);
961 bool success = false;
962
963 affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
964 state_id |= (power_state | affinity_level
965 | cluster->cpu->levels[idx].psci_id);
966
967 update_debug_pc_event(CPU_ENTER, state_id,
968 0xdeaffeed, 0xdeaffeed, true);
969 stop_critical_timings();
970 success = !arm_cpuidle_suspend(state_id);
971 start_critical_timings();
972 update_debug_pc_event(CPU_EXIT, state_id,
973 success, 0xdeaffeed, true);
974 ret = success;
975 }
976 return ret;
977}
978#else
979static bool psci_enter_sleep(struct lpm_cluster *cluster,
980 int idx, bool from_idle)
981{
982 WARN_ONCE(true, "PSCI cpu_suspend ops not supported\n");
983 return false;
984}
985#endif
986
987static int lpm_cpuidle_select(struct cpuidle_driver *drv,
988 struct cpuidle_device *dev)
989{
990 struct lpm_cluster *cluster = per_cpu(cpu_cluster, dev->cpu);
991 int idx;
992
993 if (!cluster)
994 return 0;
995
996 idx = cpu_power_select(dev, cluster->cpu);
997
998 return idx;
999}
1000
1001static int lpm_cpuidle_enter(struct cpuidle_device *dev,
1002 struct cpuidle_driver *drv, int idx)
1003{
1004 struct lpm_cluster *cluster = per_cpu(cpu_cluster, dev->cpu);
1005 bool success = true;
1006 const struct cpumask *cpumask = get_cpu_mask(dev->cpu);
1007 ktime_t start = ktime_get();
1008 int64_t start_time = ktime_to_ns(ktime_get()), end_time;
1009
1010 if (idx < 0)
1011 return -EINVAL;
1012
1013 cpu_prepare(cluster, idx, true);
1014 cluster_prepare(cluster, cpumask, idx, true, ktime_to_ns(ktime_get()));
1015
1016 trace_cpu_idle_enter(idx);
1017 lpm_stats_cpu_enter(idx, start_time);
1018
1019 if (need_resched())
1020 goto exit;
1021
1022 if (!use_psci) {
1023 if (idx > 0)
1024 update_debug_pc_event(CPU_ENTER, idx, 0xdeaffeed,
1025 0xdeaffeed, true);
1026 success = msm_cpu_pm_enter_sleep(cluster->cpu->levels[idx].mode,
1027 true);
1028
1029 if (idx > 0)
1030 update_debug_pc_event(CPU_EXIT, idx, success,
1031 0xdeaffeed, true);
1032 } else {
1033 success = psci_enter_sleep(cluster, idx, true);
1034 }
1035
1036exit:
1037 end_time = ktime_to_ns(ktime_get());
1038 lpm_stats_cpu_exit(idx, end_time, success);
1039
1040 cluster_unprepare(cluster, cpumask, idx, true, end_time);
1041 cpu_unprepare(cluster, idx, true);
1042
1043 trace_cpu_idle_exit(idx, success);
1044 dev->last_residency = ktime_us_delta(ktime_get(), start);
1045 local_irq_enable();
1046
1047 return idx;
1048}
1049
1050#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS
1051static int cpuidle_register_cpu(struct cpuidle_driver *drv,
1052 struct cpumask *mask)
1053{
1054 struct cpuidle_device *device;
1055 int cpu, ret;
1056
1057
1058 if (!mask || !drv)
1059 return -EINVAL;
1060
1061 drv->cpumask = mask;
1062 ret = cpuidle_register_driver(drv);
1063 if (ret) {
1064 pr_err("Failed to register cpuidle driver %d\n", ret);
1065 goto failed_driver_register;
1066 }
1067
1068 for_each_cpu(cpu, mask) {
1069 device = &per_cpu(cpuidle_dev, cpu);
1070 device->cpu = cpu;
1071
1072 ret = cpuidle_register_device(device);
1073 if (ret) {
1074 pr_err("Failed to register cpuidle driver for cpu:%u\n",
1075 cpu);
1076 goto failed_driver_register;
1077 }
1078 }
1079 return ret;
1080failed_driver_register:
1081 for_each_cpu(cpu, mask)
1082 cpuidle_unregister_driver(drv);
1083 return ret;
1084}
1085#else
1086static int cpuidle_register_cpu(struct cpuidle_driver *drv,
1087 struct cpumask *mask)
1088{
1089 return cpuidle_register(drv, NULL);
1090}
1091#endif
1092
1093static struct cpuidle_governor lpm_governor = {
1094 .name = "qcom",
1095 .rating = 30,
1096 .select = lpm_cpuidle_select,
1097 .owner = THIS_MODULE,
1098};
1099
1100static int cluster_cpuidle_register(struct lpm_cluster *cl)
1101{
1102 int i = 0, ret = 0;
1103 unsigned int cpu;
1104 struct lpm_cluster *p = NULL;
1105
1106 if (!cl->cpu) {
1107 struct lpm_cluster *n;
1108
1109 list_for_each_entry(n, &cl->child, list) {
1110 ret = cluster_cpuidle_register(n);
1111 if (ret)
1112 break;
1113 }
1114 return ret;
1115 }
1116
1117 cl->drv = kzalloc(sizeof(*cl->drv), GFP_KERNEL);
1118 if (!cl->drv)
1119 return -ENOMEM;
1120
1121 cl->drv->name = "msm_idle";
1122
1123 for (i = 0; i < cl->cpu->nlevels; i++) {
1124 struct cpuidle_state *st = &cl->drv->states[i];
1125 struct lpm_cpu_level *cpu_level = &cl->cpu->levels[i];
1126
1127 snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i);
1128 snprintf(st->desc, CPUIDLE_DESC_LEN, "%s", cpu_level->name);
1129 st->flags = 0;
1130 st->exit_latency = cpu_level->pwr.latency_us;
1131 st->power_usage = cpu_level->pwr.ss_power;
1132 st->target_residency = 0;
1133 st->enter = lpm_cpuidle_enter;
1134 }
1135
1136 cl->drv->state_count = cl->cpu->nlevels;
1137 cl->drv->safe_state_index = 0;
1138 for_each_cpu(cpu, &cl->child_cpus)
1139 per_cpu(cpu_cluster, cpu) = cl;
1140
1141 for_each_possible_cpu(cpu) {
1142 if (cpu_online(cpu))
1143 continue;
1144 p = per_cpu(cpu_cluster, cpu);
1145 while (p) {
1146 int j;
1147
1148 spin_lock(&p->sync_lock);
1149 cpumask_set_cpu(cpu, &p->num_children_in_sync);
1150 for (j = 0; j < p->nlevels; j++)
1151 cpumask_copy(&p->levels[j].num_cpu_votes,
1152 &p->num_children_in_sync);
1153 spin_unlock(&p->sync_lock);
1154 p = p->parent;
1155 }
1156 }
1157 ret = cpuidle_register_cpu(cl->drv, &cl->child_cpus);
1158
1159 if (ret) {
1160 kfree(cl->drv);
1161 return -ENOMEM;
1162 }
1163 return 0;
1164}
1165
1166/**
1167 * init_lpm - initializes the governor
1168 */
1169static int __init init_lpm(void)
1170{
1171 return cpuidle_register_governor(&lpm_governor);
1172}
1173
1174postcore_initcall(init_lpm);
1175
1176static void register_cpu_lpm_stats(struct lpm_cpu *cpu,
1177 struct lpm_cluster *parent)
1178{
1179 const char **level_name;
1180 int i;
1181
1182 level_name = kcalloc(cpu->nlevels, sizeof(*level_name), GFP_KERNEL);
1183
1184 if (!level_name)
1185 return;
1186
1187 for (i = 0; i < cpu->nlevels; i++)
1188 level_name[i] = cpu->levels[i].name;
1189
1190 lpm_stats_config_level("cpu", level_name, cpu->nlevels,
1191 parent->stats, &parent->child_cpus);
1192
1193 kfree(level_name);
1194}
1195
1196static void register_cluster_lpm_stats(struct lpm_cluster *cl,
1197 struct lpm_cluster *parent)
1198{
1199 const char **level_name;
1200 int i;
1201 struct lpm_cluster *child;
1202
1203 if (!cl)
1204 return;
1205
1206 level_name = kcalloc(cl->nlevels, sizeof(*level_name), GFP_KERNEL);
1207
1208 if (!level_name)
1209 return;
1210
1211 for (i = 0; i < cl->nlevels; i++)
1212 level_name[i] = cl->levels[i].level_name;
1213
1214 cl->stats = lpm_stats_config_level(cl->cluster_name, level_name,
1215 cl->nlevels, parent ? parent->stats : NULL, NULL);
1216
1217 kfree(level_name);
1218
1219 if (cl->cpu) {
1220 register_cpu_lpm_stats(cl->cpu, cl);
1221 return;
1222 }
1223
1224 list_for_each_entry(child, &cl->child, list)
1225 register_cluster_lpm_stats(child, cl);
1226}
1227
1228static int lpm_suspend_prepare(void)
1229{
1230 suspend_in_progress = true;
1231 lpm_stats_suspend_enter();
1232
1233 return 0;
1234}
1235
1236static void lpm_suspend_wake(void)
1237{
1238 suspend_in_progress = false;
1239 lpm_stats_suspend_exit();
1240}
1241
1242static int lpm_suspend_enter(suspend_state_t state)
1243{
1244 int cpu = raw_smp_processor_id();
1245 struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
1246 struct lpm_cpu *lpm_cpu = cluster->cpu;
1247 const struct cpumask *cpumask = get_cpu_mask(cpu);
1248 int idx;
1249
1250 for (idx = lpm_cpu->nlevels - 1; idx >= 0; idx--) {
1251
1252 if (lpm_cpu_mode_allow(cpu, idx, false))
1253 break;
1254 }
1255 if (idx < 0) {
1256 pr_err("Failed suspend\n");
1257 return 0;
1258 }
1259 cpu_prepare(cluster, idx, false);
1260 cluster_prepare(cluster, cpumask, idx, false, 0);
1261 if (idx > 0)
1262 update_debug_pc_event(CPU_ENTER, idx, 0xdeaffeed,
1263 0xdeaffeed, false);
1264
1265 /*
1266 * Print the clocks which are enabled during system suspend
1267 * This debug information is useful to know which are the
1268 * clocks that are enabled and preventing the system level
1269 * LPMs(XO and Vmin).
1270 */
1271 clock_debug_print_enabled(true);
1272
1273 if (!use_psci)
1274 msm_cpu_pm_enter_sleep(cluster->cpu->levels[idx].mode, false);
1275 else
1276 psci_enter_sleep(cluster, idx, true);
1277
1278 if (idx > 0)
1279 update_debug_pc_event(CPU_EXIT, idx, true, 0xdeaffeed,
1280 false);
1281
1282 cluster_unprepare(cluster, cpumask, idx, false, 0);
1283 cpu_unprepare(cluster, idx, false);
1284 return 0;
1285}
1286
1287static int lpm_dying_cpu(unsigned int cpu)
1288{
1289 struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
1290
1291 update_debug_pc_event(CPU_HP_DYING, cpu,
1292 cluster->num_children_in_sync.bits[0],
1293 cluster->child_cpus.bits[0], false);
1294 cluster_prepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
1295 return 0;
1296}
1297
1298static int lpm_starting_cpu(unsigned int cpu)
1299{
1300 struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
1301
1302 update_debug_pc_event(CPU_HP_STARTING, cpu,
1303 cluster->num_children_in_sync.bits[0],
1304 cluster->child_cpus.bits[0], false);
1305 cluster_unprepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
1306 return 0;
1307}
1308
1309static const struct platform_suspend_ops lpm_suspend_ops = {
1310 .enter = lpm_suspend_enter,
1311 .valid = suspend_valid_only_mem,
1312 .prepare_late = lpm_suspend_prepare,
1313 .wake = lpm_suspend_wake,
1314};
1315
1316static int lpm_probe(struct platform_device *pdev)
1317{
1318 int ret;
1319 int size;
1320 struct kobject *module_kobj = NULL;
1321 struct md_region md_entry;
1322
1323 get_online_cpus();
1324 lpm_root_node = lpm_of_parse_cluster(pdev);
1325
1326 if (IS_ERR_OR_NULL(lpm_root_node)) {
1327 pr_err("%s(): Failed to probe low power modes\n", __func__);
1328 put_online_cpus();
1329 return PTR_ERR(lpm_root_node);
1330 }
1331
1332 if (print_parsed_dt)
1333 cluster_dt_walkthrough(lpm_root_node);
1334
1335 /*
1336 * Register hotplug notifier before broadcast time to ensure there
1337 * to prevent race where a broadcast timer might not be setup on for a
1338 * core. BUG in existing code but no known issues possibly because of
1339 * how late lpm_levels gets initialized.
1340 */
1341 suspend_set_ops(&lpm_suspend_ops);
1342 hrtimer_init(&lpm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1343
1344 ret = remote_spin_lock_init(&scm_handoff_lock, SCM_HANDOFF_LOCK_ID);
1345 if (ret) {
1346 pr_err("%s: Failed initializing scm_handoff_lock (%d)\n",
1347 __func__, ret);
1348 put_online_cpus();
1349 return ret;
1350 }
1351 size = num_dbg_elements * sizeof(struct lpm_debug);
1352 lpm_debug = dma_alloc_coherent(&pdev->dev, size,
1353 &lpm_debug_phys, GFP_KERNEL);
1354 register_cluster_lpm_stats(lpm_root_node, NULL);
1355
1356 ret = cluster_cpuidle_register(lpm_root_node);
1357 put_online_cpus();
1358 if (ret) {
1359 pr_err("%s()Failed to register with cpuidle framework\n",
1360 __func__);
1361 goto failed;
1362 }
1363 ret = cpuhp_setup_state(CPUHP_AP_QCOM_SLEEP_STARTING,
1364 "AP_QCOM_SLEEP_STARTING",
1365 lpm_starting_cpu, lpm_dying_cpu);
1366 if (ret)
1367 goto failed;
1368
1369 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
1370 if (!module_kobj) {
1371 pr_err("%s: cannot find kobject for module %s\n",
1372 __func__, KBUILD_MODNAME);
1373 ret = -ENOENT;
1374 goto failed;
1375 }
1376
1377 ret = create_cluster_lvl_nodes(lpm_root_node, module_kobj);
1378 if (ret) {
1379 pr_err("%s(): Failed to create cluster level nodes\n",
1380 __func__);
1381 goto failed;
1382 }
1383
1384 /* Add lpm_debug to Minidump*/
1385 strlcpy(md_entry.name, "KLPMDEBUG", sizeof(md_entry.name));
1386 md_entry.virt_addr = (uintptr_t)lpm_debug;
1387 md_entry.phys_addr = lpm_debug_phys;
1388 md_entry.size = size;
1389 if (msm_minidump_add_region(&md_entry))
1390 pr_info("Failed to add lpm_debug in Minidump\n");
1391
1392 return 0;
1393failed:
1394 free_cluster_node(lpm_root_node);
1395 lpm_root_node = NULL;
1396 return ret;
1397}
1398
1399static const struct of_device_id lpm_mtch_tbl[] = {
1400 {.compatible = "qcom,lpm-levels"},
1401 {},
1402};
1403
1404static struct platform_driver lpm_driver = {
1405 .probe = lpm_probe,
1406 .driver = {
1407 .name = "lpm-levels",
1408 .owner = THIS_MODULE,
1409 .of_match_table = lpm_mtch_tbl,
1410 },
1411};
1412
1413static int __init lpm_levels_module_init(void)
1414{
1415 int rc;
1416
1417 rc = platform_driver_register(&lpm_driver);
1418 if (rc) {
1419 pr_info("Error registering %s\n", lpm_driver.driver.name);
1420 goto fail;
1421 }
1422
1423fail:
1424 return rc;
1425}
1426late_initcall(lpm_levels_module_init);
1427
1428enum msm_pm_l2_scm_flag lpm_cpu_pre_pc_cb(unsigned int cpu)
1429{
1430 struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
1431 enum msm_pm_l2_scm_flag retflag = MSM_SCM_L2_ON;
1432
1433 /*
1434 * No need to acquire the lock if probe isn't completed yet
1435 * In the event of the hotplug happening before lpm probe, we want to
1436 * flush the cache to make sure that L2 is flushed. In particular, this
1437 * could cause incoherencies for a cluster architecture. This wouldn't
1438 * affect the idle case as the idle driver wouldn't be registered
1439 * before the probe function
1440 */
1441 if (!cluster)
1442 return MSM_SCM_L2_OFF;
1443
1444 /*
1445 * Assumes L2 only. What/How parameters gets passed into TZ will
1446 * determine how this function reports this info back in msm-pm.c
1447 */
1448 spin_lock(&cluster->sync_lock);
1449
1450 if (!cluster->lpm_dev) {
1451 retflag = MSM_SCM_L2_OFF;
1452 goto unlock_and_return;
1453 }
1454
1455 if (!cpumask_equal(&cluster->num_children_in_sync,
1456 &cluster->child_cpus))
1457 goto unlock_and_return;
1458
1459 if (cluster->lpm_dev)
1460 retflag = cluster->lpm_dev->tz_flag;
1461 /*
1462 * The scm_handoff_lock will be release by the secure monitor.
1463 * It is used to serialize power-collapses from this point on,
1464 * so that both Linux and the secure context have a consistent
1465 * view regarding the number of running cpus (cpu_count).
1466 *
1467 * It must be acquired before releasing the cluster lock.
1468 */
1469unlock_and_return:
1470 update_debug_pc_event(PRE_PC_CB, retflag, 0xdeadbeef, 0xdeadbeef,
1471 0xdeadbeef);
1472 trace_pre_pc_cb(retflag);
1473 remote_spin_lock_rlock_id(&scm_handoff_lock,
1474 REMOTE_SPINLOCK_TID_START + cpu);
1475 spin_unlock(&cluster->sync_lock);
1476 return retflag;
1477}
1478
1479/**
1480 * lpm_cpu_hotplug_enter(): Called by dying CPU to terminate in low power mode
1481 *
1482 * @cpu: cpuid of the dying CPU
1483 *
1484 * Called from platform_cpu_kill() to terminate hotplug in a low power mode
1485 */
1486void lpm_cpu_hotplug_enter(unsigned int cpu)
1487{
1488 enum msm_pm_sleep_mode mode = MSM_PM_SLEEP_MODE_NR;
1489 struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
1490 int i;
1491 int idx = -1;
1492
1493 /*
1494 * If lpm isn't probed yet, try to put cpu into the one of the modes
1495 * available
1496 */
1497 if (!cluster) {
1498 if (msm_spm_is_mode_avail(
1499 MSM_SPM_MODE_POWER_COLLAPSE)){
1500 mode = MSM_PM_SLEEP_MODE_POWER_COLLAPSE;
1501 } else if (msm_spm_is_mode_avail(
1502 MSM_SPM_MODE_FASTPC)) {
1503 mode = MSM_PM_SLEEP_MODE_FASTPC;
1504 } else if (msm_spm_is_mode_avail(
1505 MSM_SPM_MODE_RETENTION)) {
1506 mode = MSM_PM_SLEEP_MODE_RETENTION;
1507 } else {
1508 pr_err("No mode avail for cpu%d hotplug\n", cpu);
1509 WARN_ON(1);
1510 return;
1511 }
1512 } else {
1513 struct lpm_cpu *lpm_cpu;
1514 uint32_t ss_pwr = ~0U;
1515
1516 lpm_cpu = cluster->cpu;
1517 for (i = 0; i < lpm_cpu->nlevels; i++) {
1518 if (ss_pwr < lpm_cpu->levels[i].pwr.ss_power)
1519 continue;
1520 ss_pwr = lpm_cpu->levels[i].pwr.ss_power;
1521 idx = i;
1522 mode = lpm_cpu->levels[i].mode;
1523 }
1524
1525 if (mode == MSM_PM_SLEEP_MODE_NR)
1526 return;
1527
1528 WARN_ON(idx < 0);
1529 cluster_prepare(cluster, get_cpu_mask(cpu), idx, false, 0);
1530 }
1531
1532 msm_cpu_pm_enter_sleep(mode, false);
1533}