blob: 5633a8fd25e7afce70eca4574eff67010ee7f8ae [file] [log] [blame]
Lina Iyer514248d2017-02-07 15:10:53 -07001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07002 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
3 * Copyright (C) 2009 Intel Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/slab.h>
20#include <linux/platform_device.h>
21#include <linux/mutex.h>
22#include <linux/cpu.h>
23#include <linux/of.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070024#include <linux/hrtimer.h>
25#include <linux/ktime.h>
26#include <linux/tick.h>
27#include <linux/suspend.h>
28#include <linux/pm_qos.h>
29#include <linux/of_platform.h>
30#include <linux/smp.h>
31#include <linux/remote_spinlock.h>
32#include <linux/msm_remote_spinlock.h>
33#include <linux/dma-mapping.h>
34#include <linux/coresight-cti.h>
35#include <linux/moduleparam.h>
36#include <linux/sched.h>
37#include <linux/cpu_pm.h>
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -060038#include <linux/cpuhotplug.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070039#include <soc/qcom/spm.h>
40#include <soc/qcom/pm.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070041#include <soc/qcom/event_timer.h>
42#include <soc/qcom/lpm-stats.h>
43#include <soc/qcom/jtag.h>
Mahesh Sivasubramanianb9da25f2016-11-04 14:35:03 -060044#include <soc/qcom/system_pm.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070045#include <asm/cputype.h>
46#include <asm/arch_timer.h>
47#include <asm/cacheflush.h>
48#include <asm/suspend.h>
49#include <asm/cpuidle.h>
50#include "lpm-levels.h"
51#include <trace/events/power.h>
52#define CREATE_TRACE_POINTS
53#include <trace/events/trace_msm_low_power.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070054
55#define SCLK_HZ (32768)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070056#define PSCI_POWER_STATE(reset) (reset << 30)
57#define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24)
Dilip Gudlur3bbafc02017-07-25 12:46:31 -070058#define BIAS_HYST (bias_hyst * NSEC_PER_MSEC)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070059
60enum {
61 MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0),
62 MSM_LPM_LVL_DBG_IDLE_LIMITS = BIT(1),
63};
64
65enum debug_event {
66 CPU_ENTER,
67 CPU_EXIT,
68 CLUSTER_ENTER,
69 CLUSTER_EXIT,
Mahesh Sivasubramanian7fa25e42017-04-13 08:55:52 -060070 CPU_HP_STARTING,
71 CPU_HP_DYING,
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070072};
73
74struct lpm_debug {
75 cycle_t time;
76 enum debug_event evt;
77 int cpu;
78 uint32_t arg1;
79 uint32_t arg2;
80 uint32_t arg3;
81 uint32_t arg4;
82};
83
84struct lpm_cluster *lpm_root_node;
85
86#define MAXSAMPLES 5
87
88static bool lpm_prediction = true;
89module_param_named(lpm_prediction, lpm_prediction, bool, 0664);
90
91static uint32_t ref_stddev = 100;
92module_param_named(ref_stddev, ref_stddev, uint, 0664);
93
94static uint32_t tmr_add = 100;
95module_param_named(tmr_add, tmr_add, uint, 0664);
96
Dilip Gudlur3bbafc02017-07-25 12:46:31 -070097static uint32_t bias_hyst;
98module_param_named(bias_hyst, bias_hyst, uint, 0664);
99
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700100struct lpm_history {
101 uint32_t resi[MAXSAMPLES];
102 int mode[MAXSAMPLES];
103 int nsamp;
104 uint32_t hptr;
105 uint32_t hinvalid;
106 uint32_t htmr_wkup;
107 int64_t stime;
108};
109
110static DEFINE_PER_CPU(struct lpm_history, hist);
111
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600112static DEFINE_PER_CPU(struct lpm_cpu*, cpu_lpm);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700113static bool suspend_in_progress;
114static struct hrtimer lpm_hrtimer;
115static struct hrtimer histtimer;
116static struct lpm_debug *lpm_debug;
117static phys_addr_t lpm_debug_phys;
118static const int num_dbg_elements = 0x100;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700119
120static void cluster_unprepare(struct lpm_cluster *cluster,
121 const struct cpumask *cpu, int child_idx, bool from_idle,
122 int64_t time);
123static void cluster_prepare(struct lpm_cluster *cluster,
124 const struct cpumask *cpu, int child_idx, bool from_idle,
125 int64_t time);
126
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700127static bool menu_select;
128module_param_named(menu_select, menu_select, bool, 0664);
129
130static int msm_pm_sleep_time_override;
131module_param_named(sleep_time_override,
132 msm_pm_sleep_time_override, int, 0664);
133static uint64_t suspend_wake_time;
134
135static bool print_parsed_dt;
136module_param_named(print_parsed_dt, print_parsed_dt, bool, 0664);
137
138static bool sleep_disabled;
139module_param_named(sleep_disabled, sleep_disabled, bool, 0664);
140
Karthikeyan Manid6681c12017-06-27 13:44:59 -0700141/**
142 * msm_cpuidle_get_deep_idle_latency - Get deep idle latency value
143 *
144 * Returns an s32 latency value
145 */
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700146s32 msm_cpuidle_get_deep_idle_latency(void)
147{
148 return 10;
149}
Karthikeyan Manid6681c12017-06-27 13:44:59 -0700150EXPORT_SYMBOL(msm_cpuidle_get_deep_idle_latency);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700151
152void lpm_suspend_wake_time(uint64_t wakeup_time)
153{
154 if (wakeup_time <= 0) {
155 suspend_wake_time = msm_pm_sleep_time_override;
156 return;
157 }
158
159 if (msm_pm_sleep_time_override &&
160 (msm_pm_sleep_time_override < wakeup_time))
161 suspend_wake_time = msm_pm_sleep_time_override;
162 else
163 suspend_wake_time = wakeup_time;
164}
165EXPORT_SYMBOL(lpm_suspend_wake_time);
166
167static uint32_t least_cluster_latency(struct lpm_cluster *cluster,
168 struct latency_level *lat_level)
169{
170 struct list_head *list;
171 struct lpm_cluster_level *level;
172 struct lpm_cluster *n;
173 struct power_params *pwr_params;
174 uint32_t latency = 0;
175 int i;
176
177 if (!cluster->list.next) {
178 for (i = 0; i < cluster->nlevels; i++) {
179 level = &cluster->levels[i];
180 pwr_params = &level->pwr;
181 if (lat_level->reset_level == level->reset_level) {
182 if ((latency > pwr_params->latency_us)
183 || (!latency))
184 latency = pwr_params->latency_us;
185 break;
186 }
187 }
188 } else {
189 list_for_each(list, &cluster->parent->child) {
190 n = list_entry(list, typeof(*n), list);
191 if (lat_level->level_name) {
192 if (strcmp(lat_level->level_name,
193 n->cluster_name))
194 continue;
195 }
196 for (i = 0; i < n->nlevels; i++) {
197 level = &n->levels[i];
198 pwr_params = &level->pwr;
199 if (lat_level->reset_level ==
200 level->reset_level) {
201 if ((latency > pwr_params->latency_us)
202 || (!latency))
203 latency =
204 pwr_params->latency_us;
205 break;
206 }
207 }
208 }
209 }
210 return latency;
211}
212
213static uint32_t least_cpu_latency(struct list_head *child,
214 struct latency_level *lat_level)
215{
216 struct list_head *list;
217 struct lpm_cpu_level *level;
218 struct power_params *pwr_params;
219 struct lpm_cpu *cpu;
220 struct lpm_cluster *n;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600221 uint32_t lat = 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700222 int i;
223
224 list_for_each(list, child) {
225 n = list_entry(list, typeof(*n), list);
226 if (lat_level->level_name) {
227 if (strcmp(lat_level->level_name, n->cluster_name))
228 continue;
229 }
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600230 list_for_each_entry(cpu, &n->cpu, list) {
231 for (i = 0; i < cpu->nlevels; i++) {
232 level = &cpu->levels[i];
233 pwr_params = &level->pwr;
234 if (lat_level->reset_level
235 == level->reset_level) {
236 if ((lat > pwr_params->latency_us)
237 || (!lat))
238 lat = pwr_params->latency_us;
239 break;
240 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700241 }
242 }
243 }
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600244 return lat;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700245}
246
247static struct lpm_cluster *cluster_aff_match(struct lpm_cluster *cluster,
248 int affinity_level)
249{
250 struct lpm_cluster *n;
251
252 if ((cluster->aff_level == affinity_level)
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600253 || ((!list_empty(&cluster->cpu)) && (affinity_level == 0)))
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700254 return cluster;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600255 else if (list_empty(&cluster->cpu)) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700256 n = list_entry(cluster->child.next, typeof(*n), list);
257 return cluster_aff_match(n, affinity_level);
258 } else
259 return NULL;
260}
261
262int lpm_get_latency(struct latency_level *level, uint32_t *latency)
263{
264 struct lpm_cluster *cluster;
265 uint32_t val;
266
267 if (!lpm_root_node) {
268 pr_err("%s: lpm_probe not completed\n", __func__);
269 return -EAGAIN;
270 }
271
272 if ((level->affinity_level < 0)
273 || (level->affinity_level > lpm_root_node->aff_level)
274 || (level->reset_level < LPM_RESET_LVL_RET)
275 || (level->reset_level > LPM_RESET_LVL_PC)
276 || !latency)
277 return -EINVAL;
278
279 cluster = cluster_aff_match(lpm_root_node, level->affinity_level);
280 if (!cluster) {
281 pr_err("%s:No matching cluster found for affinity_level:%d\n",
282 __func__, level->affinity_level);
283 return -EINVAL;
284 }
285
286 if (level->affinity_level == 0)
287 val = least_cpu_latency(&cluster->parent->child, level);
288 else
289 val = least_cluster_latency(cluster, level);
290
291 if (!val) {
292 pr_err("%s:No mode with affinity_level:%d reset_level:%d\n",
293 __func__, level->affinity_level, level->reset_level);
294 return -EINVAL;
295 }
296
297 *latency = val;
298
299 return 0;
300}
301EXPORT_SYMBOL(lpm_get_latency);
302
303static void update_debug_pc_event(enum debug_event event, uint32_t arg1,
304 uint32_t arg2, uint32_t arg3, uint32_t arg4)
305{
306 struct lpm_debug *dbg;
307 int idx;
308 static DEFINE_SPINLOCK(debug_lock);
309 static int pc_event_index;
310
311 if (!lpm_debug)
312 return;
313
314 spin_lock(&debug_lock);
315 idx = pc_event_index++;
316 dbg = &lpm_debug[idx & (num_dbg_elements - 1)];
317
318 dbg->evt = event;
319 dbg->time = arch_counter_get_cntvct();
320 dbg->cpu = raw_smp_processor_id();
321 dbg->arg1 = arg1;
322 dbg->arg2 = arg2;
323 dbg->arg3 = arg3;
324 dbg->arg4 = arg4;
325 spin_unlock(&debug_lock);
326}
327
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -0600328static int lpm_dying_cpu(unsigned int cpu)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700329{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600330 struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700331
Mahesh Sivasubramanian7fa25e42017-04-13 08:55:52 -0600332 update_debug_pc_event(CPU_HP_DYING, cpu,
333 cluster->num_children_in_sync.bits[0],
334 cluster->child_cpus.bits[0], false);
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -0600335 cluster_prepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
336 return 0;
337}
338
339static int lpm_starting_cpu(unsigned int cpu)
340{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600341 struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -0600342
Mahesh Sivasubramanian7fa25e42017-04-13 08:55:52 -0600343 update_debug_pc_event(CPU_HP_STARTING, cpu,
344 cluster->num_children_in_sync.bits[0],
345 cluster->child_cpus.bits[0], false);
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -0600346 cluster_unprepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
347 return 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700348}
349
350static enum hrtimer_restart lpm_hrtimer_cb(struct hrtimer *h)
351{
352 return HRTIMER_NORESTART;
353}
354
355static void histtimer_cancel(void)
356{
357 hrtimer_try_to_cancel(&histtimer);
358}
359
360static enum hrtimer_restart histtimer_fn(struct hrtimer *h)
361{
362 int cpu = raw_smp_processor_id();
363 struct lpm_history *history = &per_cpu(hist, cpu);
364
365 history->hinvalid = 1;
366 return HRTIMER_NORESTART;
367}
368
369static void histtimer_start(uint32_t time_us)
370{
371 uint64_t time_ns = time_us * NSEC_PER_USEC;
372 ktime_t hist_ktime = ns_to_ktime(time_ns);
373
374 histtimer.function = histtimer_fn;
375 hrtimer_start(&histtimer, hist_ktime, HRTIMER_MODE_REL_PINNED);
376}
377
378static void cluster_timer_init(struct lpm_cluster *cluster)
379{
380 struct list_head *list;
381
382 if (!cluster)
383 return;
384
385 hrtimer_init(&cluster->histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
386
387 list_for_each(list, &cluster->child) {
388 struct lpm_cluster *n;
389
390 n = list_entry(list, typeof(*n), list);
391 cluster_timer_init(n);
392 }
393}
394
395static void clusttimer_cancel(void)
396{
397 int cpu = raw_smp_processor_id();
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600398 struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700399
400 hrtimer_try_to_cancel(&cluster->histtimer);
Mahesh Sivasubramanian67011dc2017-03-01 15:00:18 -0700401
402 if (cluster->parent)
403 hrtimer_try_to_cancel(&cluster->parent->histtimer);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700404}
405
406static enum hrtimer_restart clusttimer_fn(struct hrtimer *h)
407{
408 struct lpm_cluster *cluster = container_of(h,
409 struct lpm_cluster, histtimer);
410
411 cluster->history.hinvalid = 1;
412 return HRTIMER_NORESTART;
413}
414
415static void clusttimer_start(struct lpm_cluster *cluster, uint32_t time_us)
416{
417 uint64_t time_ns = time_us * NSEC_PER_USEC;
418 ktime_t clust_ktime = ns_to_ktime(time_ns);
419
420 cluster->histtimer.function = clusttimer_fn;
421 hrtimer_start(&cluster->histtimer, clust_ktime,
422 HRTIMER_MODE_REL_PINNED);
423}
424
425static void msm_pm_set_timer(uint32_t modified_time_us)
426{
427 u64 modified_time_ns = modified_time_us * NSEC_PER_USEC;
428 ktime_t modified_ktime = ns_to_ktime(modified_time_ns);
429
430 lpm_hrtimer.function = lpm_hrtimer_cb;
431 hrtimer_start(&lpm_hrtimer, modified_ktime, HRTIMER_MODE_REL_PINNED);
432}
433
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700434static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev,
435 struct lpm_cpu *cpu, int *idx_restrict,
436 uint32_t *idx_restrict_time)
437{
438 int i, j, divisor;
439 uint64_t max, avg, stddev;
440 int64_t thresh = LLONG_MAX;
441 struct lpm_history *history = &per_cpu(hist, dev->cpu);
442 uint32_t *min_residency = get_per_cpu_min_residency(dev->cpu);
443
444 if (!lpm_prediction)
445 return 0;
446
447 /*
448 * Samples are marked invalid when woken-up due to timer,
449 * so donot predict.
450 */
451 if (history->hinvalid) {
452 history->hinvalid = 0;
453 history->htmr_wkup = 1;
454 history->stime = 0;
455 return 0;
456 }
457
458 /*
459 * Predict only when all the samples are collected.
460 */
461 if (history->nsamp < MAXSAMPLES) {
462 history->stime = 0;
463 return 0;
464 }
465
466 /*
467 * Check if the samples are not much deviated, if so use the
468 * average of those as predicted sleep time. Else if any
469 * specific mode has more premature exits return the index of
470 * that mode.
471 */
472
473again:
474 max = avg = divisor = stddev = 0;
475 for (i = 0; i < MAXSAMPLES; i++) {
476 int64_t value = history->resi[i];
477
478 if (value <= thresh) {
479 avg += value;
480 divisor++;
481 if (value > max)
482 max = value;
483 }
484 }
485 do_div(avg, divisor);
486
487 for (i = 0; i < MAXSAMPLES; i++) {
488 int64_t value = history->resi[i];
489
490 if (value <= thresh) {
491 int64_t diff = value - avg;
492
493 stddev += diff * diff;
494 }
495 }
496 do_div(stddev, divisor);
497 stddev = int_sqrt(stddev);
498
499 /*
500 * If the deviation is less, return the average, else
501 * ignore one maximum sample and retry
502 */
503 if (((avg > stddev * 6) && (divisor >= (MAXSAMPLES - 1)))
504 || stddev <= ref_stddev) {
505 history->stime = ktime_to_us(ktime_get()) + avg;
506 return avg;
507 } else if (divisor > (MAXSAMPLES - 1)) {
508 thresh = max - 1;
509 goto again;
510 }
511
512 /*
513 * Find the number of premature exits for each of the mode,
514 * excluding clockgating mode, and they are more than fifty
515 * percent restrict that and deeper modes.
516 */
517 if (history->htmr_wkup != 1) {
518 for (j = 1; j < cpu->nlevels; j++) {
519 uint32_t failed = 0;
520 uint64_t total = 0;
521
522 for (i = 0; i < MAXSAMPLES; i++) {
523 if ((history->mode[i] == j) &&
524 (history->resi[i] < min_residency[j])) {
525 failed++;
526 total += history->resi[i];
527 }
528 }
529 if (failed > (MAXSAMPLES/2)) {
530 *idx_restrict = j;
531 do_div(total, failed);
532 *idx_restrict_time = total;
533 history->stime = ktime_to_us(ktime_get())
534 + *idx_restrict_time;
535 break;
536 }
537 }
538 }
539 return 0;
540}
541
542static inline void invalidate_predict_history(struct cpuidle_device *dev)
543{
544 struct lpm_history *history = &per_cpu(hist, dev->cpu);
545
546 if (!lpm_prediction)
547 return;
548
549 if (history->hinvalid) {
550 history->hinvalid = 0;
551 history->htmr_wkup = 1;
552 history->stime = 0;
553 }
554}
555
556static void clear_predict_history(void)
557{
558 struct lpm_history *history;
559 int i;
560 unsigned int cpu;
561
562 if (!lpm_prediction)
563 return;
564
565 for_each_possible_cpu(cpu) {
566 history = &per_cpu(hist, cpu);
567 for (i = 0; i < MAXSAMPLES; i++) {
568 history->resi[i] = 0;
569 history->mode[i] = -1;
570 history->hptr = 0;
571 history->nsamp = 0;
572 history->stime = 0;
573 }
574 }
575}
576
577static void update_history(struct cpuidle_device *dev, int idx);
578
Lina Iyer634bfda2017-07-06 16:16:57 -0600579static inline bool is_cpu_biased(int cpu)
580{
581 u64 now = sched_clock();
582 u64 last = sched_get_cpu_last_busy_time(cpu);
583
584 if (!last)
585 return false;
586
587 return (now - last) < BIAS_HYST;
588}
589
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700590static int cpu_power_select(struct cpuidle_device *dev,
591 struct lpm_cpu *cpu)
592{
Lina Iyerb4a0c3d2017-07-17 11:50:25 -0600593 int best_level = 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700594 uint32_t latency_us = pm_qos_request_for_cpu(PM_QOS_CPU_DMA_LATENCY,
595 dev->cpu);
Naresh Malladi64b40552017-07-03 15:53:48 +0530596 s64 sleep_us = ktime_to_us(tick_nohz_get_sleep_length());
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700597 uint32_t modified_time_us = 0;
598 uint32_t next_event_us = 0;
599 int i, idx_restrict;
600 uint32_t lvl_latency_us = 0;
601 uint64_t predicted = 0;
602 uint32_t htime = 0, idx_restrict_time = 0;
Naresh Malladi64b40552017-07-03 15:53:48 +0530603 uint32_t next_wakeup_us = (uint32_t)sleep_us;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700604 uint32_t *min_residency = get_per_cpu_min_residency(dev->cpu);
605 uint32_t *max_residency = get_per_cpu_max_residency(dev->cpu);
606
Maulik Shah32b352c2017-04-18 20:48:44 +0530607 if ((sleep_disabled && !cpu_isolated(dev->cpu)) || sleep_us < 0)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700608 return 0;
609
610 idx_restrict = cpu->nlevels + 1;
611
612 next_event_us = (uint32_t)(ktime_to_us(get_next_event_time(dev->cpu)));
613
Lina Iyer634bfda2017-07-06 16:16:57 -0600614 if (is_cpu_biased(dev->cpu)) {
615 best_level = 0;
616 goto done_select;
617 }
618
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700619 for (i = 0; i < cpu->nlevels; i++) {
620 struct lpm_cpu_level *level = &cpu->levels[i];
621 struct power_params *pwr_params = &level->pwr;
622 enum msm_pm_sleep_mode mode = level->mode;
623 bool allow;
624
625 allow = lpm_cpu_mode_allow(dev->cpu, i, true);
626
627 if (!allow)
628 continue;
629
630 lvl_latency_us = pwr_params->latency_us;
631
632 if (latency_us < lvl_latency_us)
633 break;
634
635 if (next_event_us) {
636 if (next_event_us < lvl_latency_us)
637 break;
638
639 if (((next_event_us - lvl_latency_us) < sleep_us) ||
640 (next_event_us < sleep_us))
641 next_wakeup_us = next_event_us - lvl_latency_us;
642 }
643
644 if (!i) {
645 /*
646 * If the next_wake_us itself is not sufficient for
647 * deeper low power modes than clock gating do not
648 * call prediction.
649 */
650 if (next_wakeup_us > max_residency[i]) {
651 predicted = lpm_cpuidle_predict(dev, cpu,
652 &idx_restrict, &idx_restrict_time);
Srinivas Rao Le3af7dc2017-05-31 16:06:52 +0530653 if (predicted && (predicted < min_residency[i]))
654 predicted = min_residency[i];
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700655 } else
656 invalidate_predict_history(dev);
657 }
658
659 if (i >= idx_restrict)
660 break;
661
662 best_level = i;
663
664 if (next_event_us && next_event_us < sleep_us &&
665 (mode != MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT))
666 modified_time_us
667 = next_event_us - lvl_latency_us;
668 else
669 modified_time_us = 0;
670
671 if (predicted ? (predicted <= max_residency[i])
672 : (next_wakeup_us <= max_residency[i]))
673 break;
674 }
675
676 if (modified_time_us)
677 msm_pm_set_timer(modified_time_us);
678
679 /*
680 * Start timer to avoid staying in shallower mode forever
681 * incase of misprediciton
682 */
683 if ((predicted || (idx_restrict != (cpu->nlevels + 1)))
684 && ((best_level >= 0)
685 && (best_level < (cpu->nlevels-1)))) {
686 htime = predicted + tmr_add;
687 if (htime == tmr_add)
688 htime = idx_restrict_time;
689 else if (htime > max_residency[best_level])
690 htime = max_residency[best_level];
691
692 if ((next_wakeup_us > htime) &&
693 ((next_wakeup_us - htime) > max_residency[best_level]))
694 histtimer_start(htime);
695 }
696
Lina Iyer634bfda2017-07-06 16:16:57 -0600697done_select:
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700698 trace_cpu_power_select(best_level, sleep_us, latency_us, next_event_us);
699
700 trace_cpu_pred_select(idx_restrict_time ? 2 : (predicted ? 1 : 0),
701 predicted, htime);
702
703 return best_level;
704}
705
706static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster,
707 struct cpumask *mask, bool from_idle, uint32_t *pred_time)
708{
709 int cpu;
710 int next_cpu = raw_smp_processor_id();
711 ktime_t next_event;
712 struct cpumask online_cpus_in_cluster;
713 struct lpm_history *history;
714 int64_t prediction = LONG_MAX;
715
716 next_event.tv64 = KTIME_MAX;
717 if (!suspend_wake_time)
718 suspend_wake_time = msm_pm_sleep_time_override;
719 if (!from_idle) {
720 if (mask)
721 cpumask_copy(mask, cpumask_of(raw_smp_processor_id()));
722 if (!suspend_wake_time)
723 return ~0ULL;
724 else
725 return USEC_PER_SEC * suspend_wake_time;
726 }
727
728 cpumask_and(&online_cpus_in_cluster,
729 &cluster->num_children_in_sync, cpu_online_mask);
730
731 for_each_cpu(cpu, &online_cpus_in_cluster) {
732 ktime_t *next_event_c;
733
734 next_event_c = get_next_event_cpu(cpu);
735 if (next_event_c->tv64 < next_event.tv64) {
736 next_event.tv64 = next_event_c->tv64;
737 next_cpu = cpu;
738 }
739
740 if (from_idle && lpm_prediction) {
741 history = &per_cpu(hist, cpu);
742 if (history->stime && (history->stime < prediction))
743 prediction = history->stime;
744 }
745 }
746
747 if (mask)
748 cpumask_copy(mask, cpumask_of(next_cpu));
749
750 if (from_idle && lpm_prediction) {
751 if (prediction > ktime_to_us(ktime_get()))
752 *pred_time = prediction - ktime_to_us(ktime_get());
753 }
754
755 if (ktime_to_us(next_event) > ktime_to_us(ktime_get()))
756 return ktime_to_us(ktime_sub(next_event, ktime_get()));
757 else
758 return 0;
759}
760
761static int cluster_predict(struct lpm_cluster *cluster,
762 uint32_t *pred_us)
763{
764 int i, j;
765 int ret = 0;
766 struct cluster_history *history = &cluster->history;
767 int64_t cur_time = ktime_to_us(ktime_get());
768
769 if (!lpm_prediction)
770 return 0;
771
772 if (history->hinvalid) {
773 history->hinvalid = 0;
774 history->htmr_wkup = 1;
775 history->flag = 0;
776 return ret;
777 }
778
779 if (history->nsamp == MAXSAMPLES) {
780 for (i = 0; i < MAXSAMPLES; i++) {
781 if ((cur_time - history->stime[i])
782 > CLUST_SMPL_INVLD_TIME)
783 history->nsamp--;
784 }
785 }
786
787 if (history->nsamp < MAXSAMPLES) {
788 history->flag = 0;
789 return ret;
790 }
791
792 if (history->flag == 2)
793 history->flag = 0;
794
795 if (history->htmr_wkup != 1) {
796 uint64_t total = 0;
797
798 if (history->flag == 1) {
799 for (i = 0; i < MAXSAMPLES; i++)
800 total += history->resi[i];
801 do_div(total, MAXSAMPLES);
802 *pred_us = total;
803 return 2;
804 }
805
806 for (j = 1; j < cluster->nlevels; j++) {
807 uint32_t failed = 0;
808
809 total = 0;
810 for (i = 0; i < MAXSAMPLES; i++) {
811 if ((history->mode[i] == j) && (history->resi[i]
812 < cluster->levels[j].pwr.min_residency)) {
813 failed++;
814 total += history->resi[i];
815 }
816 }
817
818 if (failed > (MAXSAMPLES-2)) {
819 do_div(total, failed);
820 *pred_us = total;
821 history->flag = 1;
822 return 1;
823 }
824 }
825 }
826
827 return ret;
828}
829
830static void update_cluster_history_time(struct cluster_history *history,
831 int idx, uint64_t start)
832{
833 history->entry_idx = idx;
834 history->entry_time = start;
835}
836
837static void update_cluster_history(struct cluster_history *history, int idx)
838{
839 uint32_t tmr = 0;
840 uint32_t residency = 0;
841 struct lpm_cluster *cluster =
842 container_of(history, struct lpm_cluster, history);
843
844 if (!lpm_prediction)
845 return;
846
847 if ((history->entry_idx == -1) || (history->entry_idx == idx)) {
848 residency = ktime_to_us(ktime_get()) - history->entry_time;
849 history->stime[history->hptr] = history->entry_time;
850 } else
851 return;
852
853 if (history->htmr_wkup) {
854 if (!history->hptr)
855 history->hptr = MAXSAMPLES-1;
856 else
857 history->hptr--;
858
859 history->resi[history->hptr] += residency;
860
861 history->htmr_wkup = 0;
862 tmr = 1;
863 } else {
864 history->resi[history->hptr] = residency;
865 }
866
867 history->mode[history->hptr] = idx;
868
869 history->entry_idx = INT_MIN;
870 history->entry_time = 0;
871
872 if (history->nsamp < MAXSAMPLES)
873 history->nsamp++;
874
875 trace_cluster_pred_hist(cluster->cluster_name,
876 history->mode[history->hptr], history->resi[history->hptr],
877 history->hptr, tmr);
878
879 (history->hptr)++;
880
881 if (history->hptr >= MAXSAMPLES)
882 history->hptr = 0;
883}
884
885static void clear_cl_history_each(struct cluster_history *history)
886{
887 int i;
888
889 for (i = 0; i < MAXSAMPLES; i++) {
890 history->resi[i] = 0;
891 history->mode[i] = -1;
892 history->stime[i] = 0;
893 }
894 history->hptr = 0;
895 history->nsamp = 0;
896 history->flag = 0;
897 history->hinvalid = 0;
898 history->htmr_wkup = 0;
899}
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700900static void clear_cl_predict_history(void)
901{
902 struct lpm_cluster *cluster = lpm_root_node;
903 struct list_head *list;
904
905 if (!lpm_prediction)
906 return;
907
908 clear_cl_history_each(&cluster->history);
909
910 list_for_each(list, &cluster->child) {
911 struct lpm_cluster *n;
912
913 n = list_entry(list, typeof(*n), list);
914 clear_cl_history_each(&n->history);
915 }
916}
917
918static int cluster_select(struct lpm_cluster *cluster, bool from_idle,
919 int *ispred)
920{
921 int best_level = -1;
922 int i;
923 struct cpumask mask;
924 uint32_t latency_us = ~0U;
925 uint32_t sleep_us;
926 uint32_t cpupred_us = 0, pred_us = 0;
927 int pred_mode = 0, predicted = 0;
928
929 if (!cluster)
930 return -EINVAL;
931
932 sleep_us = (uint32_t)get_cluster_sleep_time(cluster, NULL,
933 from_idle, &cpupred_us);
934
935 if (from_idle) {
936 pred_mode = cluster_predict(cluster, &pred_us);
937
938 if (cpupred_us && pred_mode && (cpupred_us < pred_us))
939 pred_us = cpupred_us;
940
941 if (pred_us && pred_mode && (pred_us < sleep_us))
942 predicted = 1;
943
944 if (predicted && (pred_us == cpupred_us))
945 predicted = 2;
946 }
947
948 if (cpumask_and(&mask, cpu_online_mask, &cluster->child_cpus))
949 latency_us = pm_qos_request_for_cpumask(PM_QOS_CPU_DMA_LATENCY,
950 &mask);
951
952 /*
953 * If atleast one of the core in the cluster is online, the cluster
954 * low power modes should be determined by the idle characteristics
955 * even if the last core enters the low power mode as a part of
956 * hotplug.
957 */
958
959 if (!from_idle && num_online_cpus() > 1 &&
960 cpumask_intersects(&cluster->child_cpus, cpu_online_mask))
961 from_idle = true;
962
963 for (i = 0; i < cluster->nlevels; i++) {
964 struct lpm_cluster_level *level = &cluster->levels[i];
965 struct power_params *pwr_params = &level->pwr;
966
967 if (!lpm_cluster_mode_allow(cluster, i, from_idle))
968 continue;
969
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700970 if (!cpumask_equal(&cluster->num_children_in_sync,
971 &level->num_cpu_votes))
972 continue;
973
974 if (from_idle && latency_us < pwr_params->latency_us)
975 break;
976
977 if (sleep_us < pwr_params->time_overhead_us)
978 break;
979
980 if (suspend_in_progress && from_idle && level->notify_rpm)
981 continue;
982
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700983 best_level = i;
984
Srinivas Rao L1f3e4d52016-12-29 18:49:54 +0530985 if (from_idle &&
986 (predicted ? (pred_us <= pwr_params->max_residency)
987 : (sleep_us <= pwr_params->max_residency)))
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700988 break;
989 }
990
991 if ((best_level == (cluster->nlevels - 1)) && (pred_mode == 2))
992 cluster->history.flag = 2;
993
994 *ispred = predicted;
995
996 trace_cluster_pred_select(cluster->cluster_name, best_level, sleep_us,
997 latency_us, predicted, pred_us);
998
999 return best_level;
1000}
1001
1002static void cluster_notify(struct lpm_cluster *cluster,
1003 struct lpm_cluster_level *level, bool enter)
1004{
1005 if (level->is_reset && enter)
1006 cpu_cluster_pm_enter(cluster->aff_level);
1007 else if (level->is_reset && !enter)
1008 cpu_cluster_pm_exit(cluster->aff_level);
1009}
1010
1011static int cluster_configure(struct lpm_cluster *cluster, int idx,
1012 bool from_idle, int predicted)
1013{
1014 struct lpm_cluster_level *level = &cluster->levels[idx];
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001015
1016 if (!cpumask_equal(&cluster->num_children_in_sync, &cluster->child_cpus)
1017 || is_IPI_pending(&cluster->num_children_in_sync)) {
1018 return -EPERM;
1019 }
1020
1021 if (idx != cluster->default_level) {
1022 update_debug_pc_event(CLUSTER_ENTER, idx,
1023 cluster->num_children_in_sync.bits[0],
1024 cluster->child_cpus.bits[0], from_idle);
1025 trace_cluster_enter(cluster->cluster_name, idx,
1026 cluster->num_children_in_sync.bits[0],
1027 cluster->child_cpus.bits[0], from_idle);
1028 lpm_stats_cluster_enter(cluster->stats, idx);
1029
1030 if (from_idle && lpm_prediction)
1031 update_cluster_history_time(&cluster->history, idx,
1032 ktime_to_us(ktime_get()));
1033 }
1034
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001035 if (level->notify_rpm) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001036 uint64_t us;
1037 uint32_t pred_us;
1038
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -06001039 us = get_cluster_sleep_time(cluster, NULL, from_idle,
1040 &pred_us);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001041 us = us + 1;
1042 clear_predict_history();
1043 clear_cl_predict_history();
1044
Mahesh Sivasubramanianb9da25f2016-11-04 14:35:03 -06001045 system_sleep_enter(us);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001046 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001047 /* Notify cluster enter event after successfully config completion */
1048 cluster_notify(cluster, level, true);
1049
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001050 cluster->last_level = idx;
1051
1052 if (predicted && (idx < (cluster->nlevels - 1))) {
1053 struct power_params *pwr_params = &cluster->levels[idx].pwr;
1054
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001055 clusttimer_start(cluster, pwr_params->max_residency + tmr_add);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001056 }
1057
1058 return 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001059}
1060
1061static void cluster_prepare(struct lpm_cluster *cluster,
1062 const struct cpumask *cpu, int child_idx, bool from_idle,
1063 int64_t start_time)
1064{
1065 int i;
1066 int predicted = 0;
1067
1068 if (!cluster)
1069 return;
1070
1071 if (cluster->min_child_level > child_idx)
1072 return;
1073
1074 spin_lock(&cluster->sync_lock);
1075 cpumask_or(&cluster->num_children_in_sync, cpu,
1076 &cluster->num_children_in_sync);
1077
1078 for (i = 0; i < cluster->nlevels; i++) {
1079 struct lpm_cluster_level *lvl = &cluster->levels[i];
1080
1081 if (child_idx >= lvl->min_child_level)
1082 cpumask_or(&lvl->num_cpu_votes, cpu,
1083 &lvl->num_cpu_votes);
1084 }
1085
1086 /*
1087 * cluster_select() does not make any configuration changes. So its ok
1088 * to release the lock here. If a core wakes up for a rude request,
1089 * it need not wait for another to finish its cluster selection and
1090 * configuration process
1091 */
1092
1093 if (!cpumask_equal(&cluster->num_children_in_sync,
1094 &cluster->child_cpus))
1095 goto failed;
1096
1097 i = cluster_select(cluster, from_idle, &predicted);
1098
1099 if (((i < 0) || (i == cluster->default_level))
1100 && predicted && from_idle) {
1101 update_cluster_history_time(&cluster->history,
1102 -1, ktime_to_us(ktime_get()));
1103
1104 if (i < 0) {
1105 struct power_params *pwr_params =
1106 &cluster->levels[0].pwr;
1107
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001108 clusttimer_start(cluster,
1109 pwr_params->max_residency + tmr_add);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001110 }
1111 }
1112
1113 if (i < 0)
1114 goto failed;
1115
1116 if (cluster_configure(cluster, i, from_idle, predicted))
1117 goto failed;
1118
1119 cluster->stats->sleep_time = start_time;
1120 cluster_prepare(cluster->parent, &cluster->num_children_in_sync, i,
1121 from_idle, start_time);
1122
1123 spin_unlock(&cluster->sync_lock);
1124 return;
1125failed:
1126 spin_unlock(&cluster->sync_lock);
1127 cluster->stats->sleep_time = 0;
1128}
1129
1130static void cluster_unprepare(struct lpm_cluster *cluster,
1131 const struct cpumask *cpu, int child_idx, bool from_idle,
1132 int64_t end_time)
1133{
1134 struct lpm_cluster_level *level;
1135 bool first_cpu;
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -06001136 int last_level, i;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001137
1138 if (!cluster)
1139 return;
1140
1141 if (cluster->min_child_level > child_idx)
1142 return;
1143
1144 spin_lock(&cluster->sync_lock);
1145 last_level = cluster->default_level;
1146 first_cpu = cpumask_equal(&cluster->num_children_in_sync,
1147 &cluster->child_cpus);
1148 cpumask_andnot(&cluster->num_children_in_sync,
1149 &cluster->num_children_in_sync, cpu);
1150
1151 for (i = 0; i < cluster->nlevels; i++) {
1152 struct lpm_cluster_level *lvl = &cluster->levels[i];
1153
1154 if (child_idx >= lvl->min_child_level)
1155 cpumask_andnot(&lvl->num_cpu_votes,
1156 &lvl->num_cpu_votes, cpu);
1157 }
1158
1159 if (from_idle && first_cpu &&
1160 (cluster->last_level == cluster->default_level))
1161 update_cluster_history(&cluster->history, cluster->last_level);
1162
1163 if (!first_cpu || cluster->last_level == cluster->default_level)
1164 goto unlock_return;
1165
1166 if (cluster->stats->sleep_time)
1167 cluster->stats->sleep_time = end_time -
1168 cluster->stats->sleep_time;
1169 lpm_stats_cluster_exit(cluster->stats, cluster->last_level, true);
1170
1171 level = &cluster->levels[cluster->last_level];
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001172
Mahesh Sivasubramanianb9da25f2016-11-04 14:35:03 -06001173 if (level->notify_rpm)
1174 system_sleep_exit();
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001175
1176 update_debug_pc_event(CLUSTER_EXIT, cluster->last_level,
1177 cluster->num_children_in_sync.bits[0],
1178 cluster->child_cpus.bits[0], from_idle);
1179 trace_cluster_exit(cluster->cluster_name, cluster->last_level,
1180 cluster->num_children_in_sync.bits[0],
1181 cluster->child_cpus.bits[0], from_idle);
1182
1183 last_level = cluster->last_level;
1184 cluster->last_level = cluster->default_level;
1185
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -06001186 for (i = 0; i < cluster->ndevices; i++)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001187 level = &cluster->levels[cluster->default_level];
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001188
1189 cluster_notify(cluster, &cluster->levels[last_level], false);
1190
1191 if (from_idle)
1192 update_cluster_history(&cluster->history, last_level);
1193
1194 cluster_unprepare(cluster->parent, &cluster->child_cpus,
1195 last_level, from_idle, end_time);
1196unlock_return:
1197 spin_unlock(&cluster->sync_lock);
1198}
1199
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001200static inline void cpu_prepare(struct lpm_cpu *cpu, int cpu_index,
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001201 bool from_idle)
1202{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001203 struct lpm_cpu_level *cpu_level = &cpu->levels[cpu_index];
1204 bool jtag_save_restore = cpu->levels[cpu_index].jtag_save_restore;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001205
1206 /* Use broadcast timer for aggregating sleep mode within a cluster.
1207 * A broadcast timer could be used in the following scenarios
1208 * 1) The architected timer HW gets reset during certain low power
1209 * modes and the core relies on a external(broadcast) timer to wake up
1210 * from sleep. This information is passed through device tree.
1211 * 2) The CPU low power mode could trigger a system low power mode.
1212 * The low power module relies on Broadcast timer to aggregate the
1213 * next wakeup within a cluster, in which case, CPU switches over to
1214 * use broadcast timer.
1215 */
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001216 if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
1217 || (cpu_level->mode ==
1218 MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)
1219 || (cpu_level->is_reset)))
1220 cpu_pm_enter();
1221
1222 /*
1223 * Save JTAG registers for 8996v1.0 & 8996v2.x in C4 LPM
1224 */
1225 if (jtag_save_restore)
1226 msm_jtag_save_state();
1227}
1228
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001229static inline void cpu_unprepare(struct lpm_cpu *cpu, int cpu_index,
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001230 bool from_idle)
1231{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001232 struct lpm_cpu_level *cpu_level = &cpu->levels[cpu_index];
1233 bool jtag_save_restore = cpu->levels[cpu_index].jtag_save_restore;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001234
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001235 if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
1236 || (cpu_level->mode ==
1237 MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)
1238 || cpu_level->is_reset))
1239 cpu_pm_exit();
1240
1241 /*
1242 * Restore JTAG registers for 8996v1.0 & 8996v2.x in C4 LPM
1243 */
1244 if (jtag_save_restore)
1245 msm_jtag_restore_state();
1246}
1247
1248int get_cluster_id(struct lpm_cluster *cluster, int *aff_lvl)
1249{
1250 int state_id = 0;
1251
1252 if (!cluster)
1253 return 0;
1254
1255 spin_lock(&cluster->sync_lock);
1256
1257 if (!cpumask_equal(&cluster->num_children_in_sync,
1258 &cluster->child_cpus))
1259 goto unlock_and_return;
1260
1261 state_id |= get_cluster_id(cluster->parent, aff_lvl);
1262
1263 if (cluster->last_level != cluster->default_level) {
1264 struct lpm_cluster_level *level
1265 = &cluster->levels[cluster->last_level];
1266
1267 state_id |= (level->psci_id & cluster->psci_mode_mask)
1268 << cluster->psci_mode_shift;
1269 (*aff_lvl)++;
1270 }
1271unlock_and_return:
1272 spin_unlock(&cluster->sync_lock);
1273 return state_id;
1274}
1275
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001276static bool psci_enter_sleep(struct lpm_cpu *cpu, int idx, bool from_idle)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001277{
1278 int affinity_level = 0;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001279 int state_id = get_cluster_id(cpu->parent, &affinity_level);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001280 int power_state =
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001281 PSCI_POWER_STATE(cpu->levels[idx].is_reset);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001282 bool success = false;
1283 /*
1284 * idx = 0 is the default LPM state
1285 */
Mahesh Sivasubramanian9bb4b8c52017-07-27 09:43:44 -06001286 if (from_idle && cpu->levels[idx].use_bc_timer) {
1287 if (tick_broadcast_enter())
1288 return false;
1289 }
1290
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001291 if (!idx) {
1292 stop_critical_timings();
1293 wfi();
1294 start_critical_timings();
1295 return 1;
1296 }
1297
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001298 affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
1299 state_id |= (power_state | affinity_level
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001300 | cpu->levels[idx].psci_id);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001301
1302 update_debug_pc_event(CPU_ENTER, state_id,
1303 0xdeaffeed, 0xdeaffeed, true);
1304 stop_critical_timings();
1305 success = !arm_cpuidle_suspend(state_id);
1306 start_critical_timings();
1307 update_debug_pc_event(CPU_EXIT, state_id,
1308 success, 0xdeaffeed, true);
Mahesh Sivasubramanian9bb4b8c52017-07-27 09:43:44 -06001309
1310 if (from_idle && cpu->levels[idx].use_bc_timer)
1311 tick_broadcast_exit();
1312
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001313 return success;
1314}
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001315
1316static int lpm_cpuidle_select(struct cpuidle_driver *drv,
1317 struct cpuidle_device *dev)
1318{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001319 struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001320
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001321 if (!cpu)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001322 return 0;
1323
Lina Iyerb4a0c3d2017-07-17 11:50:25 -06001324 return cpu_power_select(dev, cpu);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001325}
1326
1327static void update_history(struct cpuidle_device *dev, int idx)
1328{
1329 struct lpm_history *history = &per_cpu(hist, dev->cpu);
1330 uint32_t tmr = 0;
1331
1332 if (!lpm_prediction)
1333 return;
1334
1335 if (history->htmr_wkup) {
1336 if (!history->hptr)
1337 history->hptr = MAXSAMPLES-1;
1338 else
1339 history->hptr--;
1340
1341 history->resi[history->hptr] += dev->last_residency;
1342 history->htmr_wkup = 0;
1343 tmr = 1;
1344 } else
1345 history->resi[history->hptr] = dev->last_residency;
1346
1347 history->mode[history->hptr] = idx;
1348
1349 trace_cpu_pred_hist(history->mode[history->hptr],
1350 history->resi[history->hptr], history->hptr, tmr);
1351
1352 if (history->nsamp < MAXSAMPLES)
1353 history->nsamp++;
1354
1355 (history->hptr)++;
1356 if (history->hptr >= MAXSAMPLES)
1357 history->hptr = 0;
1358}
1359
1360static int lpm_cpuidle_enter(struct cpuidle_device *dev,
1361 struct cpuidle_driver *drv, int idx)
1362{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001363 struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001364 bool success = true;
1365 const struct cpumask *cpumask = get_cpu_mask(dev->cpu);
1366 int64_t start_time = ktime_to_ns(ktime_get()), end_time;
1367 struct power_params *pwr_params;
1368
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001369 pwr_params = &cpu->levels[idx].pwr;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001370
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001371 pwr_params = &cpu->levels[idx].pwr;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001372
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001373 cpu_prepare(cpu, idx, true);
1374 cluster_prepare(cpu->parent, cpumask, idx, true, start_time);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001375
1376 trace_cpu_idle_enter(idx);
1377 lpm_stats_cpu_enter(idx, start_time);
1378
Lina Iyerb4a0c3d2017-07-17 11:50:25 -06001379 if (need_resched())
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001380 goto exit;
1381
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001382 success = psci_enter_sleep(cpu, idx, true);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001383
1384exit:
1385 end_time = ktime_to_ns(ktime_get());
1386 lpm_stats_cpu_exit(idx, end_time, success);
1387
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001388 cluster_unprepare(cpu->parent, cpumask, idx, true, end_time);
1389 cpu_unprepare(cpu, idx, true);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001390 sched_set_cpu_cstate(smp_processor_id(), 0, 0, 0);
1391 end_time = ktime_to_ns(ktime_get()) - start_time;
1392 do_div(end_time, 1000);
1393 dev->last_residency = end_time;
1394 update_history(dev, idx);
1395 trace_cpu_idle_exit(idx, success);
1396 local_irq_enable();
1397 if (lpm_prediction) {
1398 histtimer_cancel();
1399 clusttimer_cancel();
1400 }
1401 return idx;
1402}
1403
1404#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS
1405static int cpuidle_register_cpu(struct cpuidle_driver *drv,
1406 struct cpumask *mask)
1407{
1408 struct cpuidle_device *device;
1409 int cpu, ret;
1410
1411
1412 if (!mask || !drv)
1413 return -EINVAL;
1414
1415 drv->cpumask = mask;
1416 ret = cpuidle_register_driver(drv);
1417 if (ret) {
1418 pr_err("Failed to register cpuidle driver %d\n", ret);
1419 goto failed_driver_register;
1420 }
1421
1422 for_each_cpu(cpu, mask) {
1423 device = &per_cpu(cpuidle_dev, cpu);
1424 device->cpu = cpu;
1425
1426 ret = cpuidle_register_device(device);
1427 if (ret) {
1428 pr_err("Failed to register cpuidle driver for cpu:%u\n",
1429 cpu);
1430 goto failed_driver_register;
1431 }
1432 }
1433 return ret;
1434failed_driver_register:
1435 for_each_cpu(cpu, mask)
1436 cpuidle_unregister_driver(drv);
1437 return ret;
1438}
1439#else
1440static int cpuidle_register_cpu(struct cpuidle_driver *drv,
1441 struct cpumask *mask)
1442{
1443 return cpuidle_register(drv, NULL);
1444}
1445#endif
1446
1447static struct cpuidle_governor lpm_governor = {
1448 .name = "qcom",
1449 .rating = 30,
1450 .select = lpm_cpuidle_select,
1451 .owner = THIS_MODULE,
1452};
1453
1454static int cluster_cpuidle_register(struct lpm_cluster *cl)
1455{
1456 int i = 0, ret = 0;
1457 unsigned int cpu;
1458 struct lpm_cluster *p = NULL;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001459 struct lpm_cpu *lpm_cpu;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001460
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001461 if (list_empty(&cl->cpu)) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001462 struct lpm_cluster *n;
1463
1464 list_for_each_entry(n, &cl->child, list) {
1465 ret = cluster_cpuidle_register(n);
1466 if (ret)
1467 break;
1468 }
1469 return ret;
1470 }
1471
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001472 list_for_each_entry(lpm_cpu, &cl->cpu, list) {
1473 lpm_cpu->drv = kcalloc(1, sizeof(*lpm_cpu->drv), GFP_KERNEL);
1474 if (!lpm_cpu->drv)
1475 return -ENOMEM;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001476
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001477 lpm_cpu->drv->name = "msm_idle";
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001478
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001479 for (i = 0; i < lpm_cpu->nlevels; i++) {
1480 struct cpuidle_state *st = &lpm_cpu->drv->states[i];
1481 struct lpm_cpu_level *cpu_level = &lpm_cpu->levels[i];
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001482
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001483 snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i);
1484 snprintf(st->desc, CPUIDLE_DESC_LEN, cpu_level->name);
1485 st->flags = 0;
1486 st->exit_latency = cpu_level->pwr.latency_us;
1487 st->power_usage = cpu_level->pwr.ss_power;
1488 st->target_residency = 0;
1489 st->enter = lpm_cpuidle_enter;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001490 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001491
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001492 lpm_cpu->drv->state_count = lpm_cpu->nlevels;
1493 lpm_cpu->drv->safe_state_index = 0;
1494 for_each_cpu(cpu, &lpm_cpu->related_cpus)
1495 per_cpu(cpu_lpm, cpu) = lpm_cpu;
1496
1497 for_each_possible_cpu(cpu) {
1498 if (cpu_online(cpu))
1499 continue;
1500 if (per_cpu(cpu_lpm, cpu))
1501 p = per_cpu(cpu_lpm, cpu)->parent;
1502 while (p) {
1503 int j;
1504
1505 spin_lock(&p->sync_lock);
1506 cpumask_set_cpu(cpu, &p->num_children_in_sync);
1507 for (j = 0; j < p->nlevels; j++)
1508 cpumask_copy(
1509 &p->levels[j].num_cpu_votes,
1510 &p->num_children_in_sync);
1511 spin_unlock(&p->sync_lock);
1512 p = p->parent;
1513 }
1514 }
1515 ret = cpuidle_register_cpu(lpm_cpu->drv,
1516 &lpm_cpu->related_cpus);
1517
1518 if (ret) {
1519 kfree(lpm_cpu->drv);
1520 return -ENOMEM;
1521 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001522 }
1523 return 0;
1524}
1525
1526/**
1527 * init_lpm - initializes the governor
1528 */
1529static int __init init_lpm(void)
1530{
1531 return cpuidle_register_governor(&lpm_governor);
1532}
1533
1534postcore_initcall(init_lpm);
1535
1536static void register_cpu_lpm_stats(struct lpm_cpu *cpu,
1537 struct lpm_cluster *parent)
1538{
1539 const char **level_name;
1540 int i;
1541
1542 level_name = kcalloc(cpu->nlevels, sizeof(*level_name), GFP_KERNEL);
1543
1544 if (!level_name)
1545 return;
1546
1547 for (i = 0; i < cpu->nlevels; i++)
1548 level_name[i] = cpu->levels[i].name;
1549
1550 lpm_stats_config_level("cpu", level_name, cpu->nlevels,
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001551 parent->stats, &cpu->related_cpus);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001552
1553 kfree(level_name);
1554}
1555
1556static void register_cluster_lpm_stats(struct lpm_cluster *cl,
1557 struct lpm_cluster *parent)
1558{
1559 const char **level_name;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001560 struct lpm_cluster *child;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001561 struct lpm_cpu *cpu;
1562 int i;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001563
1564 if (!cl)
1565 return;
1566
1567 level_name = kcalloc(cl->nlevels, sizeof(*level_name), GFP_KERNEL);
1568
1569 if (!level_name)
1570 return;
1571
1572 for (i = 0; i < cl->nlevels; i++)
1573 level_name[i] = cl->levels[i].level_name;
1574
1575 cl->stats = lpm_stats_config_level(cl->cluster_name, level_name,
1576 cl->nlevels, parent ? parent->stats : NULL, NULL);
1577
1578 kfree(level_name);
1579
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001580 list_for_each_entry(cpu, &cl->cpu, list) {
1581 pr_err("%s()\n", __func__);
1582 register_cpu_lpm_stats(cpu, cl);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001583 }
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001584 if (!list_empty(&cl->cpu))
1585 return;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001586
1587 list_for_each_entry(child, &cl->child, list)
1588 register_cluster_lpm_stats(child, cl);
1589}
1590
1591static int lpm_suspend_prepare(void)
1592{
1593 suspend_in_progress = true;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001594 lpm_stats_suspend_enter();
1595
1596 return 0;
1597}
1598
1599static void lpm_suspend_wake(void)
1600{
1601 suspend_in_progress = false;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001602 lpm_stats_suspend_exit();
1603}
1604
1605static int lpm_suspend_enter(suspend_state_t state)
1606{
1607 int cpu = raw_smp_processor_id();
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001608 struct lpm_cpu *lpm_cpu = per_cpu(cpu_lpm, cpu);
1609 struct lpm_cluster *cluster = lpm_cpu->parent;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001610 const struct cpumask *cpumask = get_cpu_mask(cpu);
1611 int idx;
1612
1613 for (idx = lpm_cpu->nlevels - 1; idx >= 0; idx--) {
1614
1615 if (lpm_cpu_mode_allow(cpu, idx, false))
1616 break;
1617 }
1618 if (idx < 0) {
1619 pr_err("Failed suspend\n");
1620 return 0;
1621 }
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001622 cpu_prepare(lpm_cpu, idx, false);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001623 cluster_prepare(cluster, cpumask, idx, false, 0);
1624 if (idx > 0)
1625 update_debug_pc_event(CPU_ENTER, idx, 0xdeaffeed,
1626 0xdeaffeed, false);
1627
1628 /*
1629 * Print the clocks which are enabled during system suspend
1630 * This debug information is useful to know which are the
1631 * clocks that are enabled and preventing the system level
1632 * LPMs(XO and Vmin).
1633 */
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001634
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001635 psci_enter_sleep(lpm_cpu, idx, true);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001636
1637 if (idx > 0)
1638 update_debug_pc_event(CPU_EXIT, idx, true, 0xdeaffeed,
1639 false);
1640
1641 cluster_unprepare(cluster, cpumask, idx, false, 0);
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001642 cpu_unprepare(lpm_cpu, idx, false);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001643 return 0;
1644}
1645
1646static const struct platform_suspend_ops lpm_suspend_ops = {
1647 .enter = lpm_suspend_enter,
1648 .valid = suspend_valid_only_mem,
1649 .prepare_late = lpm_suspend_prepare,
1650 .wake = lpm_suspend_wake,
1651};
1652
1653static int lpm_probe(struct platform_device *pdev)
1654{
1655 int ret;
1656 int size;
1657 struct kobject *module_kobj = NULL;
1658
1659 get_online_cpus();
1660 lpm_root_node = lpm_of_parse_cluster(pdev);
1661
1662 if (IS_ERR_OR_NULL(lpm_root_node)) {
1663 pr_err("%s(): Failed to probe low power modes\n", __func__);
1664 put_online_cpus();
1665 return PTR_ERR(lpm_root_node);
1666 }
1667
1668 if (print_parsed_dt)
1669 cluster_dt_walkthrough(lpm_root_node);
1670
1671 /*
1672 * Register hotplug notifier before broadcast time to ensure there
1673 * to prevent race where a broadcast timer might not be setup on for a
1674 * core. BUG in existing code but no known issues possibly because of
1675 * how late lpm_levels gets initialized.
1676 */
1677 suspend_set_ops(&lpm_suspend_ops);
1678 hrtimer_init(&lpm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1679 hrtimer_init(&histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1680 cluster_timer_init(lpm_root_node);
1681
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001682 size = num_dbg_elements * sizeof(struct lpm_debug);
1683 lpm_debug = dma_alloc_coherent(&pdev->dev, size,
1684 &lpm_debug_phys, GFP_KERNEL);
1685 register_cluster_lpm_stats(lpm_root_node, NULL);
1686
1687 ret = cluster_cpuidle_register(lpm_root_node);
1688 put_online_cpus();
1689 if (ret) {
1690 pr_err("%s()Failed to register with cpuidle framework\n",
1691 __func__);
1692 goto failed;
1693 }
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -06001694 ret = cpuhp_setup_state(CPUHP_AP_QCOM_SLEEP_STARTING,
1695 "AP_QCOM_SLEEP_STARTING",
1696 lpm_starting_cpu, lpm_dying_cpu);
1697 if (ret)
1698 goto failed;
1699
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001700 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
1701 if (!module_kobj) {
1702 pr_err("%s: cannot find kobject for module %s\n",
1703 __func__, KBUILD_MODNAME);
1704 ret = -ENOENT;
1705 goto failed;
1706 }
1707
1708 ret = create_cluster_lvl_nodes(lpm_root_node, module_kobj);
1709 if (ret) {
1710 pr_err("%s(): Failed to create cluster level nodes\n",
1711 __func__);
1712 goto failed;
1713 }
1714
1715 return 0;
1716failed:
1717 free_cluster_node(lpm_root_node);
1718 lpm_root_node = NULL;
1719 return ret;
1720}
1721
1722static const struct of_device_id lpm_mtch_tbl[] = {
1723 {.compatible = "qcom,lpm-levels"},
1724 {},
1725};
1726
1727static struct platform_driver lpm_driver = {
1728 .probe = lpm_probe,
1729 .driver = {
1730 .name = "lpm-levels",
1731 .owner = THIS_MODULE,
1732 .of_match_table = lpm_mtch_tbl,
1733 },
1734};
1735
1736static int __init lpm_levels_module_init(void)
1737{
1738 int rc;
1739
1740 rc = platform_driver_register(&lpm_driver);
1741 if (rc) {
1742 pr_info("Error registering %s\n", lpm_driver.driver.name);
1743 goto fail;
1744 }
1745
1746fail:
1747 return rc;
1748}
1749late_initcall(lpm_levels_module_init);